mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Merge pull request #5079 from manuelbuil/michalsPR
netpol: Use kube-router as a library
This commit is contained in:
commit
773c2a4184
@ -33,7 +33,7 @@ K3s bundles the following technologies together into a single cohesive distribut
|
||||
* [Metrics Server](https://github.com/kubernetes-sigs/metrics-server)
|
||||
* [Traefik](https://containo.us/traefik/) for ingress
|
||||
* [Klipper-lb](https://github.com/k3s-io/klipper-lb) as an embedded service load balancer provider
|
||||
* [Kube-router](https://www.kube-router.io/) for network policy
|
||||
* [Kube-router](https://www.kube-router.io/) netpol controller for network policy
|
||||
* [Helm-controller](https://github.com/k3s-io/helm-controller) to allow for CRD-driven deployment of helm manifests
|
||||
* [Kine](https://github.com/k3s-io/kine) as a datastore shim that allows etcd to be replaced with other databases
|
||||
* [Local-path-provisioner](https://github.com/rancher/local-path-provisioner) for provisioning volumes using local storage
|
||||
|
3
go.mod
3
go.mod
@ -64,11 +64,11 @@ replace (
|
||||
|
||||
require (
|
||||
github.com/Microsoft/hcsshim v0.9.2
|
||||
github.com/cloudnativelabs/kube-router v1.3.2
|
||||
github.com/containerd/cgroups v1.0.1
|
||||
github.com/containerd/containerd v1.6.0-beta.2.0.20211117185425-a776a27af54a
|
||||
github.com/containerd/fuse-overlayfs-snapshotter v1.0.4
|
||||
github.com/containerd/stargz-snapshotter v0.10.1
|
||||
github.com/coreos/go-iptables v0.5.0
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
|
||||
github.com/docker/docker v20.10.10+incompatible
|
||||
github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83
|
||||
@ -127,7 +127,6 @@ require (
|
||||
k8s.io/controller-manager v0.23.3 // indirect
|
||||
k8s.io/cri-api v0.23.3
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.30.0
|
||||
k8s.io/kubectl v0.23.3
|
||||
k8s.io/kubernetes v1.23.3
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed
|
||||
|
37
go.sum
37
go.sum
@ -127,8 +127,9 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l
|
||||
github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM=
|
||||
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
github.com/aws/aws-sdk-go v1.38.49 h1:E31vxjCe6a5I+mJLmUGaZobiWmg9KdWaud9IfceYeYQ=
|
||||
github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.38.70 h1:EGHVUQzHIxQDF9LwQU22yE9bJd1HuBAWpJYSEnxnnhc=
|
||||
github.com/aws/aws-sdk-go v1.38.70/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@ -171,6 +172,8 @@ github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
|
||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/cloudnativelabs/kube-router v1.3.2 h1:OBnFEP8IIIiWDAWd25QXDtyXDQi6GxR0DHOP+EXcpNI=
|
||||
github.com/cloudnativelabs/kube-router v1.3.2/go.mod h1:bu7wbMiNX44Rx7mSCcvgNot2jVHuaBDu/z5ygcEtAJY=
|
||||
github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
@ -232,8 +235,9 @@ github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFD
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.5.0 h1:mw6SAibtHKZcNzAsOxjoHIG0gy5YFHhypWSSNc6EjbQ=
|
||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk=
|
||||
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -268,6 +272,7 @@ github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd h1:uVsMph
|
||||
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20171119141306-ac7624ea8da3/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
@ -294,6 +299,8 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk=
|
||||
github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
@ -328,6 +335,7 @@ github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/frankban/quicktest v1.12.1 h1:P6vQcHwZYgVGIpUzKB5DXzkEeYJppJOStPLuh9aB89c=
|
||||
github.com/frankban/quicktest v1.12.1/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU=
|
||||
github.com/fsnotify/fsnotify v1.4.2/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
@ -414,6 +422,7 @@ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfC
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-test/deep v1.0.6/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
|
||||
github.com/goccy/go-json v0.7.10 h1:ulhbuNe1JqE68nMRXXTJRrUu0uhouf0VevLINxQq4Ec=
|
||||
github.com/goccy/go-json v0.7.10/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c h1:RBUpb2b14UnmRHNd2uHz20ZHLDK+SW5Us/vWF5IHRaY=
|
||||
@ -570,6 +579,7 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v0.0.0-20170509225359-392dba7d905e/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
@ -590,6 +600,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
||||
github.com/insomniacslk/dhcp v0.0.0-20210120172423-cc9239ac6294/go.mod h1:TKl4jN3Voofo4UJIicyNhWGp/nlQqQkFxmwIFTvBkKI=
|
||||
github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
|
||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||
github.com/jessevdk/go-flags v1.3.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
@ -627,6 +638,7 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/k-sone/critbitgo v1.3.1-0.20191024122315-48c9e1530131/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s=
|
||||
github.com/k3s-io/cadvisor v0.43.0-k3s1 h1:bc51UHUKERI+X4DjgHmKq9Ong0KT3TnFVmvUIkFTCoU=
|
||||
github.com/k3s-io/cadvisor v0.43.0-k3s1/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ=
|
||||
github.com/k3s-io/containerd v1.5.9-k3s1 h1:RhXDbNgl4yZUzuJ3X8gM9rmcH7z31/D1f63xzRnZRxc=
|
||||
@ -736,6 +748,7 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9
|
||||
github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA=
|
||||
github.com/magiconair/properties v1.7.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
@ -808,6 +821,7 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
@ -877,6 +891,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
||||
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg=
|
||||
github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
@ -895,6 +910,7 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA
|
||||
github.com/opencontainers/selinux v1.8.3 h1:tzZR7AuKB5gU1+53uBkoG4XdIFGZzvJTOVoNbRQI8/4=
|
||||
github.com/opencontainers/selinux v1.8.3/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/osrg/gobgp v0.0.0-20210801043420-9e48a36ed97c/go.mod h1:aNi0T2X6FSkl1evOifmJUsdxiQ1AQkiV7fIEtLIVv/U=
|
||||
github.com/otiai10/copy v1.6.0 h1:IinKAryFFuPONZ7cm6T6E2QX/vcJwSnlaA5lfoaXIiQ=
|
||||
github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
@ -906,6 +922,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
|
||||
github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pelletier/go-toml v1.0.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
@ -989,6 +1007,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8 h1:2c1EFnZHIPCW8qKWgHMH/fX2PkSabFc5mrVzfUNdg5U=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
@ -998,6 +1017,7 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v0.0.0-20170713114250-a3f95b5c4235/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
@ -1017,26 +1037,32 @@ github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v0.0.0-20170217164146-9be650865eab/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.0-20170731170427-b26b538f6930/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20170523133247-0efa5202c046/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
@ -1054,6 +1080,7 @@ github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnR
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@ -1084,12 +1111,15 @@ github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKn
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/vishvananda/netlink v0.0.0-20170802012344-a95659537721/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852 h1:cPXZWzzG0NllBLdjWoD1nDfaqu98YMv+OneaKc8sPOA=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU=
|
||||
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
@ -1461,6 +1491,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200601152816-913338de1bd2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@ -1,66 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/netpol/namespace.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (npc *NetworkPolicyController) newNamespaceEventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
npc.handleNamespaceAdd(obj.(*api.Namespace))
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
npc.handleNamespaceUpdate(oldObj.(*api.Namespace), newObj.(*api.Namespace))
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
switch obj := obj.(type) {
|
||||
case *api.Namespace:
|
||||
npc.handleNamespaceDelete(obj)
|
||||
return
|
||||
case cache.DeletedFinalStateUnknown:
|
||||
if namespace, ok := obj.Obj.(*api.Namespace); ok {
|
||||
npc.handleNamespaceDelete(namespace)
|
||||
return
|
||||
}
|
||||
default:
|
||||
klog.Errorf("unexpected object type: %v", obj)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) handleNamespaceAdd(obj *api.Namespace) {
|
||||
if obj.Labels == nil {
|
||||
return
|
||||
}
|
||||
klog.V(2).Infof("Received update for namespace: %s", obj.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) handleNamespaceUpdate(oldObj, newObj *api.Namespace) {
|
||||
if reflect.DeepEqual(oldObj.Labels, newObj.Labels) {
|
||||
return
|
||||
}
|
||||
klog.V(2).Infof("Received update for namespace: %s", newObj.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) handleNamespaceDelete(obj *api.Namespace) {
|
||||
if obj.Labels == nil {
|
||||
return
|
||||
}
|
||||
klog.V(2).Infof("Received namespace: %s delete event", obj.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
@ -1,15 +1,20 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/netpol.go
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rancher/k3s/pkg/agent/netpol/utils"
|
||||
"github.com/cloudnativelabs/kube-router/pkg/controllers/netpol"
|
||||
"github.com/cloudnativelabs/kube-router/pkg/healthcheck"
|
||||
"github.com/cloudnativelabs/kube-router/pkg/options"
|
||||
"github.com/cloudnativelabs/kube-router/pkg/utils"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -20,8 +25,8 @@ import (
|
||||
// Run creates and starts a new instance of the kube-router network policy controller
|
||||
// The code in this function is cribbed from the upstream controller at:
|
||||
// https://github.com/cloudnativelabs/kube-router/blob/ee9f6d890d10609284098229fa1e283ab5d83b93/pkg/cmd/kube-router.go#L78
|
||||
// The NewNetworkPolicyController function has also been modified to use the k3s config.Node struct instead of KubeRouter's
|
||||
// CLI configuration, eliminate use of a WaitGroup for shutdown sequencing, and drop Prometheus metrics support.
|
||||
// It converts the k3s config.Node into kube-router configuration (only the
|
||||
// subset of options needed for netpol controller).
|
||||
func Run(ctx context.Context, nodeConfig *config.Node) error {
|
||||
set, err := utils.NewIPSet(false)
|
||||
if err != nil {
|
||||
@ -44,7 +49,21 @@ func Run(ctx context.Context, nodeConfig *config.Node) error {
|
||||
return err
|
||||
}
|
||||
|
||||
krConfig := options.NewKubeRouterConfig()
|
||||
krConfig.ClusterIPCIDR = nodeConfig.AgentConfig.ServiceCIDR.String()
|
||||
krConfig.NodePortRange = strings.ReplaceAll(nodeConfig.AgentConfig.ServiceNodePortRange.String(), "-", ":")
|
||||
krConfig.HostnameOverride = nodeConfig.AgentConfig.NodeName
|
||||
krConfig.MetricsEnabled = false
|
||||
krConfig.RunFirewall = true
|
||||
krConfig.RunRouter = false
|
||||
krConfig.RunServiceProxy = false
|
||||
|
||||
stopCh := ctx.Done()
|
||||
healthCh := make(chan *healthcheck.ControllerHeartbeat)
|
||||
|
||||
// We don't use this WaitGroup, but kube-router components require it.
|
||||
var wg sync.WaitGroup
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
podInformer := informerFactory.Core().V1().Pods().Informer()
|
||||
nsInformer := informerFactory.Core().V1().Namespaces().Informer()
|
||||
@ -52,7 +71,15 @@ func Run(ctx context.Context, nodeConfig *config.Node) error {
|
||||
informerFactory.Start(stopCh)
|
||||
informerFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
npc, err := NewNetworkPolicyController(client, nodeConfig, podInformer, npInformer, nsInformer, &sync.Mutex{})
|
||||
// Start kube-router healthcheck server. Netpol requires it
|
||||
hc, err := healthcheck.NewHealthController(krConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Add(1)
|
||||
go hc.RunCheck(healthCh, stopCh, &wg)
|
||||
|
||||
npc, err := netpol.NewNetworkPolicyController(client, krConfig, podInformer, npInformer, nsInformer, &sync.Mutex{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -61,7 +88,9 @@ func Run(ctx context.Context, nodeConfig *config.Node) error {
|
||||
nsInformer.AddEventHandler(npc.NamespaceEventHandler)
|
||||
npInformer.AddEventHandler(npc.NetworkPolicyEventHandler)
|
||||
|
||||
go npc.Run(stopCh)
|
||||
wg.Add(1)
|
||||
logrus.Info("Starting the netpol controller")
|
||||
go npc.Run(healthCh, stopCh, &wg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1,664 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/netpol/network_policy_controller.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-iptables/iptables"
|
||||
"github.com/rancher/k3s/pkg/agent/netpol/utils"
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
kubePodFirewallChainPrefix = "KUBE-POD-FW-"
|
||||
kubeNetworkPolicyChainPrefix = "KUBE-NWPLCY-"
|
||||
kubeSourceIPSetPrefix = "KUBE-SRC-"
|
||||
kubeDestinationIPSetPrefix = "KUBE-DST-"
|
||||
kubeInputChainName = "KUBE-ROUTER-INPUT"
|
||||
kubeForwardChainName = "KUBE-ROUTER-FORWARD"
|
||||
kubeOutputChainName = "KUBE-ROUTER-OUTPUT"
|
||||
kubeDefaultNetpolChain = "KUBE-NWPLCY-DEFAULT"
|
||||
defaultSyncPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Network policy controller provides both ingress and egress filtering for the pods as per the defined network
|
||||
// policies. Two different types of iptables chains are used. Each pod running on the node which either
|
||||
// requires ingress or egress filtering gets a pod specific chains. Each network policy has a iptables chain, which
|
||||
// has rules expressed through ipsets matching source and destination pod ip's. In the FORWARD chain of the
|
||||
// filter table a rule is added to jump the traffic originating (in case of egress network policy) from the pod
|
||||
// or destined (in case of ingress network policy) to the pod specific iptables chain. Each
|
||||
// pod specific iptables chain has rules to jump to the network polices chains, that pod matches. So packet
|
||||
// originating/destined from/to pod goes through filter table's, FORWARD chain, followed by pod specific chain,
|
||||
// followed by one or more network policy chains, till there is a match which will accept the packet, or gets
|
||||
// dropped by the rule in the pod chain, if there is no match.
|
||||
|
||||
// NetworkPolicyController struct to hold information required by NetworkPolicyController
|
||||
type NetworkPolicyController struct {
|
||||
nodeIP net.IP
|
||||
nodeHostName string
|
||||
serviceClusterIPRange net.IPNet
|
||||
serviceExternalIPRanges []net.IPNet
|
||||
serviceNodePortRange string
|
||||
mu sync.Mutex
|
||||
syncPeriod time.Duration
|
||||
fullSyncRequestChan chan struct{}
|
||||
ipsetMutex *sync.Mutex
|
||||
|
||||
ipSetHandler *utils.IPSet
|
||||
|
||||
podLister cache.Indexer
|
||||
npLister cache.Indexer
|
||||
nsLister cache.Indexer
|
||||
|
||||
PodEventHandler cache.ResourceEventHandler
|
||||
NamespaceEventHandler cache.ResourceEventHandler
|
||||
NetworkPolicyEventHandler cache.ResourceEventHandler
|
||||
|
||||
filterTableRules bytes.Buffer
|
||||
}
|
||||
|
||||
// internal structure to represent a network policy
|
||||
type networkPolicyInfo struct {
|
||||
name string
|
||||
namespace string
|
||||
podSelector labels.Selector
|
||||
|
||||
// set of pods matching network policy spec podselector label selector
|
||||
targetPods map[string]podInfo
|
||||
|
||||
// whitelist ingress rules from the network policy spec
|
||||
ingressRules []ingressRule
|
||||
|
||||
// whitelist egress rules from the network policy spec
|
||||
egressRules []egressRule
|
||||
|
||||
// policy type "ingress" or "egress" or "both" as defined by PolicyType in the spec
|
||||
policyType string
|
||||
}
|
||||
|
||||
// internal structure to represent Pod
|
||||
type podInfo struct {
|
||||
ip string
|
||||
name string
|
||||
namespace string
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// internal structure to represent NetworkPolicyIngressRule in the spec
|
||||
type ingressRule struct {
|
||||
matchAllPorts bool
|
||||
ports []protocolAndPort
|
||||
namedPorts []endPoints
|
||||
matchAllSource bool
|
||||
srcPods []podInfo
|
||||
srcIPBlocks [][]string
|
||||
}
|
||||
|
||||
// internal structure to represent NetworkPolicyEgressRule in the spec
|
||||
type egressRule struct {
|
||||
matchAllPorts bool
|
||||
ports []protocolAndPort
|
||||
namedPorts []endPoints
|
||||
matchAllDestinations bool
|
||||
dstPods []podInfo
|
||||
dstIPBlocks [][]string
|
||||
}
|
||||
|
||||
type protocolAndPort struct {
|
||||
protocol string
|
||||
port string
|
||||
endport string
|
||||
}
|
||||
|
||||
type endPoints struct {
|
||||
ips []string
|
||||
protocolAndPort
|
||||
}
|
||||
|
||||
type numericPort2eps map[string]*endPoints
|
||||
type protocol2eps map[string]numericPort2eps
|
||||
type namedPort2eps map[string]protocol2eps
|
||||
|
||||
// Run runs forever till we receive notification on stopCh
|
||||
func (npc *NetworkPolicyController) Run(stopCh <-chan struct{}) {
|
||||
t := time.NewTicker(npc.syncPeriod)
|
||||
defer t.Stop()
|
||||
|
||||
klog.Info("Starting network policy controller")
|
||||
|
||||
// setup kube-router specific top level custom chains (KUBE-ROUTER-INPUT, KUBE-ROUTER-FORWARD, KUBE-ROUTER-OUTPUT)
|
||||
npc.ensureTopLevelChains()
|
||||
|
||||
// setup default network policy chain that is applied to traffic from/to the pods that does not match any network policy
|
||||
npc.ensureDefaultNetworkPolicyChain()
|
||||
|
||||
// Full syncs of the network policy controller take a lot of time and can only be processed one at a time,
|
||||
// therefore, we start it in it's own goroutine and request a sync through a single item channel
|
||||
klog.Info("Starting network policy controller full sync goroutine")
|
||||
go func(fullSyncRequest <-chan struct{}, stopCh <-chan struct{}) {
|
||||
for {
|
||||
// Add an additional non-blocking select to ensure that if the stopCh channel is closed it is handled first
|
||||
select {
|
||||
case <-stopCh:
|
||||
klog.Info("Shutting down network policies full sync goroutine")
|
||||
return
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-stopCh:
|
||||
klog.Info("Shutting down network policies full sync goroutine")
|
||||
return
|
||||
case <-fullSyncRequest:
|
||||
klog.V(3).Info("Received request for a full sync, processing")
|
||||
npc.fullPolicySync() // fullPolicySync() is a blocking request here
|
||||
}
|
||||
}
|
||||
}(npc.fullSyncRequestChan, stopCh)
|
||||
|
||||
// loop forever till notified to stop on stopCh
|
||||
for {
|
||||
klog.V(1).Info("Requesting periodic sync of iptables to reflect network policies")
|
||||
npc.RequestFullSync()
|
||||
select {
|
||||
case <-stopCh:
|
||||
klog.Infof("Shutting down network policies controller")
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RequestFullSync allows the request of a full network policy sync without blocking the callee
|
||||
func (npc *NetworkPolicyController) RequestFullSync() {
|
||||
select {
|
||||
case npc.fullSyncRequestChan <- struct{}{}:
|
||||
klog.V(3).Info("Full sync request queue was empty so a full sync request was successfully sent")
|
||||
default: // Don't block if the buffered channel is full, return quickly so that we don't block callee execution
|
||||
klog.V(1).Info("Full sync request queue was full, skipping...")
|
||||
}
|
||||
}
|
||||
|
||||
// Sync synchronizes iptables to desired state of network policies
|
||||
func (npc *NetworkPolicyController) fullPolicySync() {
|
||||
|
||||
var err error
|
||||
var networkPoliciesInfo []networkPolicyInfo
|
||||
npc.mu.Lock()
|
||||
defer npc.mu.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
syncVersion := strconv.FormatInt(start.UnixNano(), 10)
|
||||
defer func() {
|
||||
endTime := time.Since(start)
|
||||
klog.V(1).Infof("sync iptables took %v", endTime)
|
||||
}()
|
||||
|
||||
klog.V(1).Infof("Starting sync of iptables with version: %s", syncVersion)
|
||||
|
||||
// ensure kube-router specific top level chains and corresponding rules exist
|
||||
npc.ensureTopLevelChains()
|
||||
|
||||
// ensure default network policy chain that is applied to traffic from/to the pods that does not match any network policy
|
||||
npc.ensureDefaultNetworkPolicyChain()
|
||||
|
||||
networkPoliciesInfo, err = npc.buildNetworkPoliciesInfo()
|
||||
if err != nil {
|
||||
klog.Errorf("Aborting sync. Failed to build network policies: %v", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
npc.filterTableRules.Reset()
|
||||
if err := utils.SaveInto("filter", &npc.filterTableRules); err != nil {
|
||||
klog.Errorf("Aborting sync. Failed to run iptables-save: %v" + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
activePolicyChains, activePolicyIPSets, err := npc.syncNetworkPolicyChains(networkPoliciesInfo, syncVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Aborting sync. Failed to sync network policy chains: %v" + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
activePodFwChains, err := npc.syncPodFirewallChains(networkPoliciesInfo, syncVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Aborting sync. Failed to sync pod firewalls: %v", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
err = npc.cleanupStaleRules(activePolicyChains, activePodFwChains)
|
||||
if err != nil {
|
||||
klog.Errorf("Aborting sync. Failed to cleanup stale iptables rules: %v", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := utils.Restore("filter", npc.filterTableRules.Bytes()); err != nil {
|
||||
klog.Errorf("Aborting sync. Failed to run iptables-restore: %v\n%s", err.Error(), npc.filterTableRules.String())
|
||||
return
|
||||
}
|
||||
|
||||
err = npc.cleanupStaleIPSets(activePolicyIPSets)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to cleanup stale ipsets: %v", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Creates custom chains KUBE-ROUTER-INPUT, KUBE-ROUTER-FORWARD, KUBE-ROUTER-OUTPUT
|
||||
// and following rules in the filter table to jump from builtin chain to custom chain
|
||||
// -A INPUT -m comment --comment "kube-router netpol" -j KUBE-ROUTER-INPUT
|
||||
// -A FORWARD -m comment --comment "kube-router netpol" -j KUBE-ROUTER-FORWARD
|
||||
// -A OUTPUT -m comment --comment "kube-router netpol" -j KUBE-ROUTER-OUTPUT
|
||||
func (npc *NetworkPolicyController) ensureTopLevelChains() {
|
||||
|
||||
iptablesCmdHandler, err := iptables.New()
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to initialize iptables executor due to %s", err.Error())
|
||||
}
|
||||
|
||||
addUUIDForRuleSpec := func(chain string, ruleSpec *[]string) (string, error) {
|
||||
hash := sha256.Sum256([]byte(chain + strings.Join(*ruleSpec, "")))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])[:16]
|
||||
for idx, part := range *ruleSpec {
|
||||
if "--comment" == part {
|
||||
(*ruleSpec)[idx+1] = (*ruleSpec)[idx+1] + " - " + encoded
|
||||
return encoded, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("could not find a comment in the ruleSpec string given: %s", strings.Join(*ruleSpec, " "))
|
||||
}
|
||||
|
||||
ensureRuleAtPosition := func(chain string, ruleSpec []string, uuid string, position int) {
|
||||
exists, err := iptablesCmdHandler.Exists("filter", chain, ruleSpec...)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to verify rule exists in %s chain due to %s", chain, err.Error())
|
||||
}
|
||||
if !exists {
|
||||
err := iptablesCmdHandler.Insert("filter", chain, position, ruleSpec...)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to run iptables command to insert in %s chain %s", chain, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
rules, err := iptablesCmdHandler.List("filter", chain)
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to list rules in filter table %s chain due to %s", chain, err.Error())
|
||||
}
|
||||
|
||||
var ruleNo, ruleIndexOffset int
|
||||
for i, rule := range rules {
|
||||
rule = strings.Replace(rule, "\"", "", 2) //removes quote from comment string
|
||||
if strings.HasPrefix(rule, "-P") || strings.HasPrefix(rule, "-N") {
|
||||
// if this chain has a default policy, then it will show as rule #1 from iptablesCmdHandler.List so we
|
||||
// need to account for this offset
|
||||
ruleIndexOffset++
|
||||
continue
|
||||
}
|
||||
if strings.Contains(rule, uuid) {
|
||||
// range uses a 0 index, but iptables uses a 1 index so we need to increase ruleNo by 1
|
||||
ruleNo = i + 1 - ruleIndexOffset
|
||||
break
|
||||
}
|
||||
}
|
||||
if ruleNo != position {
|
||||
err = iptablesCmdHandler.Insert("filter", chain, position, ruleSpec...)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to run iptables command to insert in %s chain %s", chain, err.Error())
|
||||
}
|
||||
err = iptablesCmdHandler.Delete("filter", chain, strconv.Itoa(ruleNo+1))
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to delete incorrect rule in %s chain due to %s", chain, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chains := map[string]string{"INPUT": kubeInputChainName, "FORWARD": kubeForwardChainName, "OUTPUT": kubeOutputChainName}
|
||||
|
||||
for builtinChain, customChain := range chains {
|
||||
err = iptablesCmdHandler.NewChain("filter", customChain)
|
||||
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
|
||||
klog.Fatalf("Failed to run iptables command to create %s chain due to %s", customChain, err.Error())
|
||||
}
|
||||
args := []string{"-m", "comment", "--comment", "kube-router netpol", "-j", customChain}
|
||||
uuid, err := addUUIDForRuleSpec(builtinChain, &args)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get uuid for rule: %s", err.Error())
|
||||
}
|
||||
ensureRuleAtPosition(builtinChain, args, uuid, 1)
|
||||
}
|
||||
|
||||
whitelistServiceVips := []string{"-m", "comment", "--comment", "allow traffic to cluster IP", "-d", npc.serviceClusterIPRange.String(), "-j", "RETURN"}
|
||||
uuid, err := addUUIDForRuleSpec(kubeInputChainName, &whitelistServiceVips)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get uuid for rule: %s", err.Error())
|
||||
}
|
||||
ensureRuleAtPosition(kubeInputChainName, whitelistServiceVips, uuid, 1)
|
||||
|
||||
whitelistTCPNodeports := []string{"-p", "tcp", "-m", "comment", "--comment", "allow LOCAL TCP traffic to node ports", "-m", "addrtype", "--dst-type", "LOCAL",
|
||||
"-m", "multiport", "--dports", npc.serviceNodePortRange, "-j", "RETURN"}
|
||||
uuid, err = addUUIDForRuleSpec(kubeInputChainName, &whitelistTCPNodeports)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get uuid for rule: %s", err.Error())
|
||||
}
|
||||
ensureRuleAtPosition(kubeInputChainName, whitelistTCPNodeports, uuid, 2)
|
||||
|
||||
whitelistUDPNodeports := []string{"-p", "udp", "-m", "comment", "--comment", "allow LOCAL UDP traffic to node ports", "-m", "addrtype", "--dst-type", "LOCAL",
|
||||
"-m", "multiport", "--dports", npc.serviceNodePortRange, "-j", "RETURN"}
|
||||
uuid, err = addUUIDForRuleSpec(kubeInputChainName, &whitelistUDPNodeports)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get uuid for rule: %s", err.Error())
|
||||
}
|
||||
ensureRuleAtPosition(kubeInputChainName, whitelistUDPNodeports, uuid, 3)
|
||||
|
||||
for externalIPIndex, externalIPRange := range npc.serviceExternalIPRanges {
|
||||
whitelistServiceVips := []string{"-m", "comment", "--comment", "allow traffic to external IP range: " + externalIPRange.String(), "-d", externalIPRange.String(), "-j", "RETURN"}
|
||||
uuid, err = addUUIDForRuleSpec(kubeInputChainName, &whitelistServiceVips)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to get uuid for rule: %s", err.Error())
|
||||
}
|
||||
ensureRuleAtPosition(kubeInputChainName, whitelistServiceVips, uuid, externalIPIndex+4)
|
||||
}
|
||||
|
||||
// for the traffic to/from the local pod's let network policy controller be
|
||||
// authoritative entity to ACCEPT the traffic if it complies to network policies
|
||||
for _, chain := range chains {
|
||||
comment := "rule to explicitly ACCEPT traffic that comply to network policies"
|
||||
args := []string{"-m", "comment", "--comment", comment, "-m", "mark", "--mark", "0x20000/0x20000", "-j", "ACCEPT"}
|
||||
err = iptablesCmdHandler.AppendUnique("filter", chain, args...)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to run iptables command: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Creates custom chains KUBE-NWPLCY-DEFAULT
|
||||
func (npc *NetworkPolicyController) ensureDefaultNetworkPolicyChain() {
|
||||
|
||||
iptablesCmdHandler, err := iptables.New()
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to initialize iptables executor due to %s", err.Error())
|
||||
}
|
||||
|
||||
markArgs := make([]string, 0)
|
||||
markComment := "rule to mark traffic matching a network policy"
|
||||
markArgs = append(markArgs, "-j", "MARK", "-m", "comment", "--comment", markComment, "--set-xmark", "0x10000/0x10000")
|
||||
|
||||
err = iptablesCmdHandler.NewChain("filter", kubeDefaultNetpolChain)
|
||||
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
|
||||
klog.Fatalf("Failed to run iptables command to create %s chain due to %s", kubeDefaultNetpolChain, err.Error())
|
||||
}
|
||||
err = iptablesCmdHandler.AppendUnique("filter", kubeDefaultNetpolChain, markArgs...)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed to run iptables command: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) cleanupStaleRules(activePolicyChains, activePodFwChains map[string]bool) error {
|
||||
|
||||
cleanupPodFwChains := make([]string, 0)
|
||||
cleanupPolicyChains := make([]string, 0)
|
||||
|
||||
// initialize tool sets for working with iptables and ipset
|
||||
iptablesCmdHandler, err := iptables.New()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize iptables command executor due to %s", err.Error())
|
||||
}
|
||||
|
||||
// find iptables chains and ipsets that are no longer used by comparing current to the active maps we were passed
|
||||
chains, err := iptablesCmdHandler.ListChains("filter")
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list chains: %s", err)
|
||||
}
|
||||
for _, chain := range chains {
|
||||
if strings.HasPrefix(chain, kubeNetworkPolicyChainPrefix) {
|
||||
if chain == kubeDefaultNetpolChain {
|
||||
continue
|
||||
}
|
||||
if _, ok := activePolicyChains[chain]; !ok {
|
||||
cleanupPolicyChains = append(cleanupPolicyChains, chain)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(chain, kubePodFirewallChainPrefix) {
|
||||
if _, ok := activePodFwChains[chain]; !ok {
|
||||
cleanupPodFwChains = append(cleanupPodFwChains, chain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var newChains, newRules, desiredFilterTable bytes.Buffer
|
||||
rules := strings.Split(npc.filterTableRules.String(), "\n")
|
||||
if len(rules) > 0 && rules[len(rules)-1] == "" {
|
||||
rules = rules[:len(rules)-1]
|
||||
}
|
||||
for _, rule := range rules {
|
||||
skipRule := false
|
||||
for _, podFWChainName := range cleanupPodFwChains {
|
||||
if strings.Contains(rule, podFWChainName) {
|
||||
skipRule = true
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, policyChainName := range cleanupPolicyChains {
|
||||
if strings.Contains(rule, policyChainName) {
|
||||
skipRule = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if strings.Contains(rule, "COMMIT") || strings.HasPrefix(rule, "# ") {
|
||||
skipRule = true
|
||||
}
|
||||
if skipRule {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(rule, ":") {
|
||||
newChains.WriteString(rule + " - [0:0]\n")
|
||||
}
|
||||
if strings.HasPrefix(rule, "-") {
|
||||
newRules.WriteString(rule + "\n")
|
||||
}
|
||||
}
|
||||
desiredFilterTable.WriteString("*filter" + "\n")
|
||||
desiredFilterTable.Write(newChains.Bytes())
|
||||
desiredFilterTable.Write(newRules.Bytes())
|
||||
desiredFilterTable.WriteString("COMMIT" + "\n")
|
||||
npc.filterTableRules = desiredFilterTable
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) cleanupStaleIPSets(activePolicyIPSets map[string]bool) error {
|
||||
cleanupPolicyIPSets := make([]*utils.Set, 0)
|
||||
ipsets, err := utils.NewIPSet(false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create ipsets command executor due to %s", err.Error())
|
||||
}
|
||||
err = ipsets.Save()
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to initialize ipsets command executor due to %s", err.Error())
|
||||
}
|
||||
for _, set := range ipsets.Sets {
|
||||
if strings.HasPrefix(set.Name, kubeSourceIPSetPrefix) ||
|
||||
strings.HasPrefix(set.Name, kubeDestinationIPSetPrefix) {
|
||||
if _, ok := activePolicyIPSets[set.Name]; !ok {
|
||||
cleanupPolicyIPSets = append(cleanupPolicyIPSets, set)
|
||||
}
|
||||
}
|
||||
}
|
||||
// cleanup network policy ipsets
|
||||
for _, set := range cleanupPolicyIPSets {
|
||||
err = set.Destroy()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete ipset %s due to %s", set.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup cleanup configurations done
|
||||
func (npc *NetworkPolicyController) Cleanup() {
|
||||
|
||||
klog.Info("Cleaning up iptables configuration permanently done by kube-router")
|
||||
|
||||
iptablesCmdHandler, err := iptables.New()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to initialize iptables executor: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// delete jump rules in FORWARD chain to pod specific firewall chain
|
||||
forwardChainRules, err := iptablesCmdHandler.List("filter", kubeForwardChainName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete iptables rules as part of cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: need a better way to delete rule with out using number
|
||||
var realRuleNo int
|
||||
for i, rule := range forwardChainRules {
|
||||
if strings.Contains(rule, kubePodFirewallChainPrefix) {
|
||||
err = iptablesCmdHandler.Delete("filter", kubeForwardChainName, strconv.Itoa(i-realRuleNo))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete iptables rule as part of cleanup: %s", err)
|
||||
}
|
||||
realRuleNo++
|
||||
}
|
||||
}
|
||||
|
||||
// delete jump rules in OUTPUT chain to pod specific firewall chain
|
||||
forwardChainRules, err = iptablesCmdHandler.List("filter", kubeOutputChainName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete iptables rules as part of cleanup")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: need a better way to delete rule with out using number
|
||||
realRuleNo = 0
|
||||
for i, rule := range forwardChainRules {
|
||||
if strings.Contains(rule, kubePodFirewallChainPrefix) {
|
||||
err = iptablesCmdHandler.Delete("filter", kubeOutputChainName, strconv.Itoa(i-realRuleNo))
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to delete iptables rule as part of cleanup: %s", err)
|
||||
}
|
||||
realRuleNo++
|
||||
}
|
||||
}
|
||||
|
||||
// flush and delete pod specific firewall chain
|
||||
chains, err := iptablesCmdHandler.ListChains("filter")
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to list chains: %s", err)
|
||||
return
|
||||
}
|
||||
for _, chain := range chains {
|
||||
if strings.HasPrefix(chain, kubePodFirewallChainPrefix) {
|
||||
err = iptablesCmdHandler.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to cleanup iptables rules: " + err.Error())
|
||||
return
|
||||
}
|
||||
err = iptablesCmdHandler.DeleteChain("filter", chain)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to cleanup iptables rules: " + err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// flush and delete per network policy specific chain
|
||||
chains, err = iptablesCmdHandler.ListChains("filter")
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to list chains: %s", err)
|
||||
return
|
||||
}
|
||||
for _, chain := range chains {
|
||||
if strings.HasPrefix(chain, kubeNetworkPolicyChainPrefix) {
|
||||
err = iptablesCmdHandler.ClearChain("filter", chain)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to cleanup iptables rules: " + err.Error())
|
||||
return
|
||||
}
|
||||
err = iptablesCmdHandler.DeleteChain("filter", chain)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to cleanup iptables rules: " + err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete all ipsets
|
||||
klog.V(1).Infof("Attempting to attain ipset mutex lock")
|
||||
npc.ipsetMutex.Lock()
|
||||
klog.V(1).Infof("Attained ipset mutex lock, continuing...")
|
||||
defer func() {
|
||||
npc.ipsetMutex.Unlock()
|
||||
klog.V(1).Infof("Returned ipset mutex lock")
|
||||
}()
|
||||
ipset, err := utils.NewIPSet(false)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to clean up ipsets: " + err.Error())
|
||||
return
|
||||
}
|
||||
err = ipset.Save()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to clean up ipsets: " + err.Error())
|
||||
}
|
||||
err = ipset.DestroyAllWithin()
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to clean up ipsets: " + err.Error())
|
||||
}
|
||||
klog.Infof("Successfully cleaned the iptables configuration done by kube-router")
|
||||
}
|
||||
|
||||
// NewNetworkPolicyController returns new NetworkPolicyController object
|
||||
func NewNetworkPolicyController(clientset kubernetes.Interface,
|
||||
config *config.Node, podInformer cache.SharedIndexInformer,
|
||||
npInformer cache.SharedIndexInformer, nsInformer cache.SharedIndexInformer, ipsetMutex *sync.Mutex) (*NetworkPolicyController, error) {
|
||||
npc := NetworkPolicyController{ipsetMutex: ipsetMutex}
|
||||
|
||||
// Creating a single-item buffered channel to ensure that we only keep a single full sync request at a time,
|
||||
// additional requests would be pointless to queue since after the first one was processed the system would already
|
||||
// be up to date with all of the policy changes from any enqueued request after that
|
||||
npc.fullSyncRequestChan = make(chan struct{}, 1)
|
||||
|
||||
npc.serviceClusterIPRange = *config.AgentConfig.ServiceCIDR
|
||||
npc.serviceNodePortRange = strings.ReplaceAll(config.AgentConfig.ServiceNodePortRange.String(), "-", ":")
|
||||
npc.syncPeriod = defaultSyncPeriod
|
||||
|
||||
node, err := utils.GetNodeObject(clientset, config.AgentConfig.NodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeIP, err := utils.GetNodeIP(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
npc.nodeIP = nodeIP
|
||||
|
||||
npc.podLister = podInformer.GetIndexer()
|
||||
npc.PodEventHandler = npc.newPodEventHandler()
|
||||
|
||||
npc.nsLister = nsInformer.GetIndexer()
|
||||
npc.NamespaceEventHandler = npc.newNamespaceEventHandler()
|
||||
|
||||
npc.npLister = npInformer.GetIndexer()
|
||||
npc.NetworkPolicyEventHandler = npc.newNetworkPolicyEventHandler()
|
||||
|
||||
return &npc, nil
|
||||
}
|
@ -1,669 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/network_policy_controller_test.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3s/pkg/daemons/config"
|
||||
netv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
// newFakeInformersFromClient creates the different informers used in the uneventful network policy controller
|
||||
func newFakeInformersFromClient(kubeClient clientset.Interface) (informers.SharedInformerFactory, cache.SharedIndexInformer, cache.SharedIndexInformer, cache.SharedIndexInformer) {
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
|
||||
podInformer := informerFactory.Core().V1().Pods().Informer()
|
||||
npInformer := informerFactory.Networking().V1().NetworkPolicies().Informer()
|
||||
nsInformer := informerFactory.Core().V1().Namespaces().Informer()
|
||||
return informerFactory, podInformer, nsInformer, npInformer
|
||||
}
|
||||
|
||||
type tNamespaceMeta struct {
|
||||
name string
|
||||
labels labels.Set
|
||||
}
|
||||
|
||||
// Add resources to Informer Store object to simulate updating the Informer
|
||||
func tAddToInformerStore(t *testing.T, informer cache.SharedIndexInformer, obj interface{}) {
|
||||
err := informer.GetStore().Add(obj)
|
||||
if err != nil {
|
||||
t.Fatalf("error injecting object to Informer Store: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type tNetpol struct {
|
||||
name string
|
||||
namespace string
|
||||
podSelector metav1.LabelSelector
|
||||
ingress []netv1.NetworkPolicyIngressRule
|
||||
egress []netv1.NetworkPolicyEgressRule
|
||||
}
|
||||
|
||||
// createFakeNetpol is a helper to create the network policy from the tNetpol struct
|
||||
func (ns *tNetpol) createFakeNetpol(t *testing.T, informer cache.SharedIndexInformer) {
|
||||
polTypes := make([]netv1.PolicyType, 0)
|
||||
if len(ns.ingress) != 0 {
|
||||
polTypes = append(polTypes, netv1.PolicyTypeIngress)
|
||||
}
|
||||
if len(ns.egress) != 0 {
|
||||
polTypes = append(polTypes, netv1.PolicyTypeEgress)
|
||||
}
|
||||
tAddToInformerStore(t, informer,
|
||||
&netv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: ns.name, Namespace: ns.namespace},
|
||||
Spec: netv1.NetworkPolicySpec{
|
||||
PodSelector: ns.podSelector,
|
||||
PolicyTypes: polTypes,
|
||||
Ingress: ns.ingress,
|
||||
Egress: ns.egress,
|
||||
}})
|
||||
}
|
||||
|
||||
func (ns *tNetpol) findNetpolMatch(netpols *[]networkPolicyInfo) *networkPolicyInfo {
|
||||
for _, netpol := range *netpols {
|
||||
if netpol.namespace == ns.namespace && netpol.name == ns.name {
|
||||
return &netpol
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tPodNamespaceMap is a helper to create sets of namespace,pod names
|
||||
type tPodNamespaceMap map[string]map[string]bool
|
||||
|
||||
func (t tPodNamespaceMap) addPod(pod podInfo) {
|
||||
if _, ok := t[pod.namespace]; !ok {
|
||||
t[pod.namespace] = make(map[string]bool)
|
||||
}
|
||||
t[pod.namespace][pod.name] = true
|
||||
}
|
||||
func (t tPodNamespaceMap) delPod(pod podInfo) {
|
||||
delete(t[pod.namespace], pod.name)
|
||||
if len(t[pod.namespace]) == 0 {
|
||||
delete(t, pod.namespace)
|
||||
}
|
||||
}
|
||||
func (t tPodNamespaceMap) addNSPodInfo(ns, podname string) {
|
||||
if _, ok := t[ns]; !ok {
|
||||
t[ns] = make(map[string]bool)
|
||||
}
|
||||
t[ns][podname] = true
|
||||
}
|
||||
func (t tPodNamespaceMap) copy() tPodNamespaceMap {
|
||||
newMap := make(tPodNamespaceMap)
|
||||
for ns, pods := range t {
|
||||
for p := range pods {
|
||||
newMap.addNSPodInfo(ns, p)
|
||||
}
|
||||
}
|
||||
return newMap
|
||||
}
|
||||
func (t tPodNamespaceMap) toStrSlice() (r []string) {
|
||||
for ns, pods := range t {
|
||||
for pod := range pods {
|
||||
r = append(r, ns+":"+pod)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// tNewPodNamespaceMapFromTC creates a new tPodNamespaceMap from the info of the testcase
|
||||
func tNewPodNamespaceMapFromTC(target map[string]string) tPodNamespaceMap {
|
||||
newMap := make(tPodNamespaceMap)
|
||||
for ns, pods := range target {
|
||||
for _, pod := range strings.Split(pods, ",") {
|
||||
newMap.addNSPodInfo(ns, pod)
|
||||
}
|
||||
}
|
||||
return newMap
|
||||
}
|
||||
|
||||
// tCreateFakePods creates the Pods and Namespaces that will be affected by the network policies
|
||||
// returns a map like map[Namespace]map[PodName]bool
|
||||
func tCreateFakePods(t *testing.T, podInformer cache.SharedIndexInformer, nsInformer cache.SharedIndexInformer) tPodNamespaceMap {
|
||||
podNamespaceMap := make(tPodNamespaceMap)
|
||||
pods := []podInfo{
|
||||
{name: "Aa", labels: labels.Set{"app": "a"}, namespace: "nsA", ip: "1.1"},
|
||||
{name: "Aaa", labels: labels.Set{"app": "a", "component": "a"}, namespace: "nsA", ip: "1.2"},
|
||||
{name: "Aab", labels: labels.Set{"app": "a", "component": "b"}, namespace: "nsA", ip: "1.3"},
|
||||
{name: "Aac", labels: labels.Set{"app": "a", "component": "c"}, namespace: "nsA", ip: "1.4"},
|
||||
{name: "Ba", labels: labels.Set{"app": "a"}, namespace: "nsB", ip: "2.1"},
|
||||
{name: "Baa", labels: labels.Set{"app": "a", "component": "a"}, namespace: "nsB", ip: "2.2"},
|
||||
{name: "Bab", labels: labels.Set{"app": "a", "component": "b"}, namespace: "nsB", ip: "2.3"},
|
||||
{name: "Ca", labels: labels.Set{"app": "a"}, namespace: "nsC", ip: "3.1"},
|
||||
}
|
||||
namespaces := []tNamespaceMeta{
|
||||
{name: "nsA", labels: labels.Set{"name": "a", "team": "a"}},
|
||||
{name: "nsB", labels: labels.Set{"name": "b", "team": "a"}},
|
||||
{name: "nsC", labels: labels.Set{"name": "c"}},
|
||||
{name: "nsD", labels: labels.Set{"name": "d"}},
|
||||
}
|
||||
ipsUsed := make(map[string]bool)
|
||||
for _, pod := range pods {
|
||||
podNamespaceMap.addPod(pod)
|
||||
ipaddr := "1.1." + pod.ip
|
||||
if ipsUsed[ipaddr] {
|
||||
t.Fatalf("there is another pod with the same Ip address %s as this pod %s namespace %s",
|
||||
ipaddr, pod.name, pod.name)
|
||||
}
|
||||
ipsUsed[ipaddr] = true
|
||||
tAddToInformerStore(t, podInformer,
|
||||
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: pod.name, Labels: pod.labels, Namespace: pod.namespace},
|
||||
Status: v1.PodStatus{PodIP: ipaddr}})
|
||||
}
|
||||
for _, ns := range namespaces {
|
||||
tAddToInformerStore(t, nsInformer, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns.name, Labels: ns.labels}})
|
||||
}
|
||||
return podNamespaceMap
|
||||
}
|
||||
|
||||
// newFakeNode is a helper function for creating Nodes for testing.
|
||||
func newFakeNode(name string, addr string) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{{Type: v1.NodeExternalIP, Address: addr}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// newUneventfulNetworkPolicyController returns new NetworkPolicyController object without any event handler
|
||||
func newUneventfulNetworkPolicyController(podInformer cache.SharedIndexInformer,
|
||||
npInformer cache.SharedIndexInformer, nsInformer cache.SharedIndexInformer) (*NetworkPolicyController, error) {
|
||||
|
||||
npc := NetworkPolicyController{}
|
||||
npc.syncPeriod = time.Hour
|
||||
|
||||
npc.nodeHostName = "node"
|
||||
npc.nodeIP = net.IPv4(10, 10, 10, 10)
|
||||
npc.podLister = podInformer.GetIndexer()
|
||||
npc.nsLister = nsInformer.GetIndexer()
|
||||
npc.npLister = npInformer.GetIndexer()
|
||||
|
||||
return &npc, nil
|
||||
}
|
||||
|
||||
// tNetpolTestCase helper struct to define the inputs to the test case (netpols) and
|
||||
// the expected selected targets (targetPods, inSourcePods for ingress targets, and outDestPods
|
||||
// for egress targets) as maps with key being the namespace and a csv of pod names
|
||||
type tNetpolTestCase struct {
|
||||
name string
|
||||
netpol tNetpol
|
||||
targetPods tPodNamespaceMap
|
||||
inSourcePods tPodNamespaceMap
|
||||
outDestPods tPodNamespaceMap
|
||||
expectedRule string
|
||||
}
|
||||
|
||||
// tGetNotTargetedPods finds set of pods that should not be targeted by netpol selectors
|
||||
func tGetNotTargetedPods(podsGot []podInfo, wanted tPodNamespaceMap) []string {
|
||||
unwanted := make(tPodNamespaceMap)
|
||||
for _, pod := range podsGot {
|
||||
if !wanted[pod.namespace][pod.name] {
|
||||
unwanted.addPod(pod)
|
||||
}
|
||||
}
|
||||
return unwanted.toStrSlice()
|
||||
}
|
||||
|
||||
// tGetTargetPodsMissing returns the set of pods that should have been targeted but were missing by netpol selectors
|
||||
func tGetTargetPodsMissing(podsGot []podInfo, wanted tPodNamespaceMap) []string {
|
||||
missing := wanted.copy()
|
||||
for _, pod := range podsGot {
|
||||
if wanted[pod.namespace][pod.name] {
|
||||
missing.delPod(pod)
|
||||
}
|
||||
}
|
||||
return missing.toStrSlice()
|
||||
}
|
||||
|
||||
func tListOfPodsFromTargets(target map[string]podInfo) (r []podInfo) {
|
||||
for _, pod := range target {
|
||||
r = append(r, pod)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func testForMissingOrUnwanted(t *testing.T, targetMsg string, got []podInfo, wanted tPodNamespaceMap) {
|
||||
if missing := tGetTargetPodsMissing(got, wanted); len(missing) != 0 {
|
||||
t.Errorf("Some Pods were not selected %s: %s", targetMsg, strings.Join(missing, ", "))
|
||||
}
|
||||
if missing := tGetNotTargetedPods(got, wanted); len(missing) != 0 {
|
||||
t.Errorf("Some Pods NOT expected were selected on %s: %s", targetMsg, strings.Join(missing, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func newMinimalNodeConfig(serviceIPCIDR string, nodePortRange string, hostNameOverride string, externalIPs []string) *config.Node {
|
||||
nodeConfig := &config.Node{AgentConfig: config.Agent{}}
|
||||
|
||||
if serviceIPCIDR != "" {
|
||||
_, cidr, err := net.ParseCIDR(serviceIPCIDR)
|
||||
if err != nil {
|
||||
panic("failed to get parse --service-cluster-ip-range parameter: " + err.Error())
|
||||
}
|
||||
nodeConfig.AgentConfig.ServiceCIDR = cidr
|
||||
} else {
|
||||
nodeConfig.AgentConfig.ServiceCIDR = &net.IPNet{}
|
||||
}
|
||||
if nodePortRange != "" {
|
||||
portRange, err := utilnet.ParsePortRange(nodePortRange)
|
||||
if err != nil {
|
||||
panic("failed to get parse --service-node-port-range:" + err.Error())
|
||||
}
|
||||
nodeConfig.AgentConfig.ServiceNodePortRange = *portRange
|
||||
}
|
||||
if hostNameOverride != "" {
|
||||
nodeConfig.AgentConfig.NodeName = hostNameOverride
|
||||
}
|
||||
if externalIPs != nil {
|
||||
// TODO: We don't currently have a way to set these through the K3s CLI; if we ever do then test that here.
|
||||
for _, cidr := range externalIPs {
|
||||
if _, _, err := net.ParseCIDR(cidr); err != nil {
|
||||
panic("failed to get parse --service-external-ip-range parameter: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nodeConfig
|
||||
}
|
||||
|
||||
type tNetPolConfigTestCase struct {
|
||||
name string
|
||||
config *config.Node
|
||||
expectError bool
|
||||
errorText string
|
||||
}
|
||||
|
||||
func Test_UnitNewNetworkPolicySelectors(t *testing.T) {
|
||||
testCases := []tNetpolTestCase{
|
||||
{
|
||||
name: "Non-Existent Namespace",
|
||||
netpol: tNetpol{name: "nsXX", podSelector: metav1.LabelSelector{}, namespace: "nsXX"},
|
||||
targetPods: nil,
|
||||
},
|
||||
{
|
||||
name: "Empty Namespace",
|
||||
netpol: tNetpol{name: "nsD", podSelector: metav1.LabelSelector{}, namespace: "nsD"},
|
||||
targetPods: nil,
|
||||
},
|
||||
{
|
||||
name: "All pods in nsA",
|
||||
netpol: tNetpol{name: "nsA", podSelector: metav1.LabelSelector{}, namespace: "nsA"},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
|
||||
},
|
||||
{
|
||||
name: "All pods in nsB",
|
||||
netpol: tNetpol{name: "nsB", podSelector: metav1.LabelSelector{}, namespace: "nsB"},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsB": "Ba,Baa,Bab"}),
|
||||
},
|
||||
{
|
||||
name: "All pods in nsC",
|
||||
netpol: tNetpol{name: "nsC", podSelector: metav1.LabelSelector{}, namespace: "nsC"},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsC": "Ca"}),
|
||||
},
|
||||
{
|
||||
name: "All pods app=a in nsA using matchExpressions",
|
||||
netpol: tNetpol{
|
||||
name: "nsA-app-a-matchExpression",
|
||||
namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "app",
|
||||
Operator: "In",
|
||||
Values: []string{"a"},
|
||||
}}}},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
|
||||
},
|
||||
{
|
||||
name: "All pods app=a in nsA using matchLabels",
|
||||
netpol: tNetpol{name: "nsA-app-a-matchLabels", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"app": "a"}}},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
|
||||
},
|
||||
{
|
||||
name: "All pods app=a in nsA using matchLabels ingress allow from any pod in nsB",
|
||||
netpol: tNetpol{name: "nsA-app-a-matchLabels-2", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{MatchLabels: map[string]string{"app": "a"}},
|
||||
ingress: []netv1.NetworkPolicyIngressRule{{From: []netv1.NetworkPolicyPeer{{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": "b"}}}}}},
|
||||
},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
|
||||
inSourcePods: tNewPodNamespaceMapFromTC(map[string]string{"nsB": "Ba,Baa,Bab"}),
|
||||
},
|
||||
{
|
||||
name: "All pods app=a in nsA using matchLabels ingress allow from pod in nsB with component = b",
|
||||
netpol: tNetpol{name: "nsA-app-a-matchExpression-2", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{MatchLabels: map[string]string{"app": "a"}},
|
||||
ingress: []netv1.NetworkPolicyIngressRule{{From: []netv1.NetworkPolicyPeer{
|
||||
{
|
||||
NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": "b"}},
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "component",
|
||||
Operator: "In",
|
||||
Values: []string{"b"},
|
||||
}}},
|
||||
},
|
||||
}}}},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
|
||||
inSourcePods: tNewPodNamespaceMapFromTC(map[string]string{"nsB": "Bab"}),
|
||||
},
|
||||
{
|
||||
name: "All pods app=a,component=b or c in nsA",
|
||||
netpol: tNetpol{name: "nsA-app-a-matchExpression-3", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: "In",
|
||||
Values: []string{"a"},
|
||||
},
|
||||
{
|
||||
Key: "component",
|
||||
Operator: "In",
|
||||
Values: []string{"b", "c"},
|
||||
}}},
|
||||
},
|
||||
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aab,Aac"}),
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*newFakeNode("node", "10.10.10.10")}})
|
||||
informerFactory, podInformer, nsInformer, netpolInformer := newFakeInformersFromClient(client)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
informerFactory.Start(ctx.Done())
|
||||
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
|
||||
krNetPol, _ := newUneventfulNetworkPolicyController(podInformer, netpolInformer, nsInformer)
|
||||
tCreateFakePods(t, podInformer, nsInformer)
|
||||
for _, test := range testCases {
|
||||
test.netpol.createFakeNetpol(t, netpolInformer)
|
||||
}
|
||||
netpols, err := krNetPol.buildNetworkPoliciesInfo()
|
||||
if err != nil {
|
||||
t.Errorf("Problems building policies")
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
np := test.netpol.findNetpolMatch(&netpols)
|
||||
testForMissingOrUnwanted(t, "targetPods", tListOfPodsFromTargets(np.targetPods), test.targetPods)
|
||||
for _, ingress := range np.ingressRules {
|
||||
testForMissingOrUnwanted(t, "ingress srcPods", ingress.srcPods, test.inSourcePods)
|
||||
}
|
||||
for _, egress := range np.egressRules {
|
||||
testForMissingOrUnwanted(t, "egress dstPods", egress.dstPods, test.outDestPods)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_UnitNetworkPolicyBuilder(t *testing.T) {
|
||||
port, port1 := intstr.FromInt(30000), intstr.FromInt(34000)
|
||||
ingressPort := intstr.FromInt(37000)
|
||||
endPort, endPort1 := int32(31000), int32(35000)
|
||||
testCases := []tNetpolTestCase{
|
||||
{
|
||||
name: "Simple Egress Destination Port",
|
||||
netpol: tNetpol{name: "simple-egress", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: "In",
|
||||
Values: []string{"a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
egress: []netv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
Ports: []netv1.NetworkPolicyPort{
|
||||
{
|
||||
Port: &port,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRule: "-A KUBE-NWPLCY-QHFGOTFJZFXUJVTH -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-egress namespace nsA\" --dport 30000 -j MARK --set-xmark 0x10000/0x10000 \n" +
|
||||
"-A KUBE-NWPLCY-QHFGOTFJZFXUJVTH -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-egress namespace nsA\" --dport 30000 -m mark --mark 0x10000/0x10000 -j RETURN \n",
|
||||
},
|
||||
{
|
||||
name: "Simple Ingress/Egress Destination Port",
|
||||
netpol: tNetpol{name: "simple-ingress-egress", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: "In",
|
||||
Values: []string{"a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
egress: []netv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
Ports: []netv1.NetworkPolicyPort{
|
||||
{
|
||||
Port: &port,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ingress: []netv1.NetworkPolicyIngressRule{
|
||||
{
|
||||
Ports: []netv1.NetworkPolicyPort{
|
||||
{
|
||||
Port: &ingressPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRule: "-A KUBE-NWPLCY-KO52PWL34ABMMBI7 -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-ingress-egress namespace nsA\" --dport 30000 -j MARK --set-xmark 0x10000/0x10000 \n" +
|
||||
"-A KUBE-NWPLCY-KO52PWL34ABMMBI7 -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-ingress-egress namespace nsA\" --dport 30000 -m mark --mark 0x10000/0x10000 -j RETURN \n" +
|
||||
"-A KUBE-NWPLCY-KO52PWL34ABMMBI7 -m comment --comment \"rule to ACCEPT traffic from all sources to dest pods selected by policy name: simple-ingress-egress namespace nsA\" --dport 37000 -j MARK --set-xmark 0x10000/0x10000 \n" +
|
||||
"-A KUBE-NWPLCY-KO52PWL34ABMMBI7 -m comment --comment \"rule to ACCEPT traffic from all sources to dest pods selected by policy name: simple-ingress-egress namespace nsA\" --dport 37000 -m mark --mark 0x10000/0x10000 -j RETURN \n",
|
||||
},
|
||||
{
|
||||
name: "Simple Egress Destination Port Range",
|
||||
netpol: tNetpol{name: "simple-egress-pr", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: "In",
|
||||
Values: []string{"a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
egress: []netv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
Ports: []netv1.NetworkPolicyPort{
|
||||
{
|
||||
Port: &port,
|
||||
EndPort: &endPort,
|
||||
},
|
||||
{
|
||||
Port: &port1,
|
||||
EndPort: &endPort1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRule: "-A KUBE-NWPLCY-SQYQ7PVNG6A6Q3DU -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-egress-pr namespace nsA\" --dport 30000:31000 -j MARK --set-xmark 0x10000/0x10000 \n" +
|
||||
"-A KUBE-NWPLCY-SQYQ7PVNG6A6Q3DU -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-egress-pr namespace nsA\" --dport 30000:31000 -m mark --mark 0x10000/0x10000 -j RETURN \n" +
|
||||
"-A KUBE-NWPLCY-SQYQ7PVNG6A6Q3DU -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-egress-pr namespace nsA\" --dport 34000:35000 -j MARK --set-xmark 0x10000/0x10000 \n" +
|
||||
"-A KUBE-NWPLCY-SQYQ7PVNG6A6Q3DU -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: simple-egress-pr namespace nsA\" --dport 34000:35000 -m mark --mark 0x10000/0x10000 -j RETURN \n",
|
||||
},
|
||||
{
|
||||
name: "Port > EndPort (invalid condition, should drop endport)",
|
||||
netpol: tNetpol{name: "invalid-endport", namespace: "nsA",
|
||||
podSelector: metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "app",
|
||||
Operator: "In",
|
||||
Values: []string{"a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
egress: []netv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
Ports: []netv1.NetworkPolicyPort{
|
||||
{
|
||||
Port: &port1,
|
||||
EndPort: &endPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRule: "-A KUBE-NWPLCY-2A4DPWPR5REBS66I -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: invalid-endport namespace nsA\" --dport 34000 -j MARK --set-xmark 0x10000/0x10000 \n" +
|
||||
"-A KUBE-NWPLCY-2A4DPWPR5REBS66I -m comment --comment \"rule to ACCEPT traffic from source pods to all destinations selected by policy name: invalid-endport namespace nsA\" --dport 34000 -m mark --mark 0x10000/0x10000 -j RETURN \n",
|
||||
},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*newFakeNode("node", "10.10.10.10")}})
|
||||
informerFactory, podInformer, nsInformer, netpolInformer := newFakeInformersFromClient(client)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
informerFactory.Start(ctx.Done())
|
||||
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
|
||||
krNetPol, _ := newUneventfulNetworkPolicyController(podInformer, netpolInformer, nsInformer)
|
||||
tCreateFakePods(t, podInformer, nsInformer)
|
||||
for _, test := range testCases {
|
||||
test.netpol.createFakeNetpol(t, netpolInformer)
|
||||
netpols, err := krNetPol.buildNetworkPoliciesInfo()
|
||||
if err != nil {
|
||||
t.Errorf("Problems building policies: %s", err)
|
||||
}
|
||||
for _, np := range netpols {
|
||||
fmt.Printf(np.policyType)
|
||||
if np.policyType == "egress" || np.policyType == "both" {
|
||||
err = krNetPol.processEgressRules(np, "", nil, "1")
|
||||
if err != nil {
|
||||
t.Errorf("Error syncing the rules: %s", err)
|
||||
}
|
||||
}
|
||||
if np.policyType == "ingress" || np.policyType == "both" {
|
||||
err = krNetPol.processIngressRules(np, "", nil, "1")
|
||||
if err != nil {
|
||||
t.Errorf("Error syncing the rules: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !bytes.Equal([]byte(test.expectedRule), krNetPol.filterTableRules.Bytes()) {
|
||||
t.Errorf("Invalid rule %s created:\nExpected:\n%s \nGot:\n%s", test.name, test.expectedRule, krNetPol.filterTableRules.String())
|
||||
}
|
||||
key := fmt.Sprintf("%s/%s", test.netpol.namespace, test.netpol.name)
|
||||
obj, exists, err := krNetPol.npLister.GetByKey(key)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get Netpol from store: %s", err)
|
||||
}
|
||||
if exists {
|
||||
err = krNetPol.npLister.Delete(obj)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to remove Netpol from store: %s", err)
|
||||
}
|
||||
}
|
||||
krNetPol.filterTableRules.Reset()
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_UnitNetworkPolicyController(t *testing.T) {
|
||||
testCases := []tNetPolConfigTestCase{
|
||||
{
|
||||
"Default options are successful",
|
||||
newMinimalNodeConfig("", "", "node", nil),
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Missing nodename fails appropriately",
|
||||
newMinimalNodeConfig("", "", "", nil),
|
||||
true,
|
||||
"failed to identify the node by NODE_NAME, hostname or --hostname-override",
|
||||
},
|
||||
{
|
||||
"Test good cluster CIDR (using single IP with a /32)",
|
||||
newMinimalNodeConfig("10.10.10.10/32", "", "node", nil),
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Test good cluster CIDR (using normal range with /24)",
|
||||
newMinimalNodeConfig("10.10.10.0/24", "", "node", nil),
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Test good node port specification (using hyphen separator)",
|
||||
newMinimalNodeConfig("", "8080-8090", "node", nil),
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Test good external IP CIDR (using single IP with a /32)",
|
||||
newMinimalNodeConfig("", "", "node", []string{"199.10.10.10/32"}),
|
||||
false,
|
||||
"",
|
||||
},
|
||||
{
|
||||
"Test good external IP CIDR (using normal range with /24)",
|
||||
newMinimalNodeConfig("", "", "node", []string{"199.10.10.10/24"}),
|
||||
false,
|
||||
"",
|
||||
},
|
||||
}
|
||||
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*newFakeNode("node", "10.10.10.10")}})
|
||||
_, podInformer, nsInformer, netpolInformer := newFakeInformersFromClient(client)
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := NewNetworkPolicyController(client, test.config, podInformer, netpolInformer, nsInformer, &sync.Mutex{})
|
||||
if err == nil && test.expectError {
|
||||
t.Error("This config should have failed, but it was successful instead")
|
||||
} else if err != nil {
|
||||
// Unfortunately without doing a ton of extra refactoring work, we can't remove this reference easily
|
||||
// from the controllers start up. Luckily it's one of the last items to be processed in the controller
|
||||
// so for now we'll consider that if we hit this error that we essentially didn't hit an error at all
|
||||
// TODO: refactor NPC to use an injectable interface for ipset operations
|
||||
if !test.expectError && err.Error() != "Ipset utility not found" {
|
||||
t.Errorf("This config should have been successful, but it failed instead. Error: %s", err)
|
||||
} else if test.expectError && err.Error() != test.errorText {
|
||||
t.Errorf("Expected error: '%s' but instead got: '%s'", test.errorText, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Ref:
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/podgc/gc_controller_test.go
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/testutil/test_utils.go
|
@ -1,268 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/netpol/pod.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"strings"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (npc *NetworkPolicyController) newPodEventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if podObj, ok := obj.(*api.Pod); ok {
|
||||
// If the pod isn't yet actionable there is no action to take here anyway, so skip it. When it becomes
|
||||
// actionable, we'll get an update below.
|
||||
if isNetPolActionable(podObj) {
|
||||
npc.OnPodUpdate(obj)
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
var newPodObj, oldPodObj *api.Pod
|
||||
var ok bool
|
||||
|
||||
// If either of these objects are not pods, quit now
|
||||
if newPodObj, ok = newObj.(*api.Pod); !ok {
|
||||
return
|
||||
}
|
||||
if oldPodObj, ok = oldObj.(*api.Pod); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// We don't check isNetPolActionable here, because if it is transitioning in or out of the actionable state
|
||||
// we want to run the full sync so that it can be added or removed from the existing network policy of the host
|
||||
// For the network policies, we are only interested in some changes, most pod changes aren't relevant to network policy
|
||||
if isPodUpdateNetPolRelevant(oldPodObj, newPodObj) {
|
||||
npc.OnPodUpdate(newObj)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
npc.handlePodDelete(obj)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// OnPodUpdate handles updates to pods from the Kubernetes api server
|
||||
func (npc *NetworkPolicyController) OnPodUpdate(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
klog.V(2).Infof("Received update to pod: %s/%s", pod.Namespace, pod.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) handlePodDelete(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
klog.Errorf("unexpected object type: %v", obj)
|
||||
return
|
||||
}
|
||||
if pod, ok = tombstone.Obj.(*api.Pod); !ok {
|
||||
klog.Errorf("unexpected object type: %v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
klog.V(2).Infof("Received pod: %s/%s delete event", pod.Namespace, pod.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo []networkPolicyInfo, version string) (map[string]bool, error) {
|
||||
|
||||
activePodFwChains := make(map[string]bool)
|
||||
|
||||
dropUnmarkedTrafficRules := func(podName, podNamespace, podFwChainName string) error {
|
||||
// add rule to log the packets that will be dropped due to network policy enforcement
|
||||
comment := "\"rule to log dropped traffic POD name:" + podName + " namespace: " + podNamespace + "\""
|
||||
args := []string{"-A", podFwChainName, "-m", "comment", "--comment", comment, "-m", "mark", "!", "--mark", "0x10000/0x10000", "-j", "NFLOG", "--nflog-group", "100", "-m", "limit", "--limit", "10/minute", "--limit-burst", "10", "\n"}
|
||||
// This used to be AppendUnique when we were using iptables directly, this checks to make sure we didn't drop unmarked for this chain already
|
||||
if strings.Contains(npc.filterTableRules.String(), strings.Join(args, " ")) {
|
||||
return nil
|
||||
}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
// add rule to DROP if no applicable network policy permits the traffic
|
||||
comment = "\"rule to REJECT traffic destined for POD name:" + podName + " namespace: " + podNamespace + "\""
|
||||
args = []string{"-A", podFwChainName, "-m", "comment", "--comment", comment, "-m", "mark", "!", "--mark", "0x10000/0x10000", "-j", "REJECT", "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
// reset mark to let traffic pass through rest of the chains
|
||||
args = []string{"-A", podFwChainName, "-j", "MARK", "--set-mark", "0/0x10000", "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loop through the pods running on the node
|
||||
allLocalPods, err := npc.getLocalPods(npc.nodeIP.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pod := range *allLocalPods {
|
||||
|
||||
// ensure pod specific firewall chain exist for all the pods that need ingress firewall
|
||||
podFwChainName := podFirewallChainName(pod.namespace, pod.name, version)
|
||||
npc.filterTableRules.WriteString(":" + podFwChainName + "\n")
|
||||
|
||||
activePodFwChains[podFwChainName] = true
|
||||
|
||||
// setup rules to run through applicable ingress/egress network policies for the pod
|
||||
npc.setupPodNetpolRules(&pod, podFwChainName, networkPoliciesInfo, version)
|
||||
|
||||
// setup rules to intercept inbound traffic to the pods
|
||||
npc.interceptPodInboundTraffic(&pod, podFwChainName)
|
||||
|
||||
// setup rules to intercept inbound traffic to the pods
|
||||
npc.interceptPodOutboundTraffic(&pod, podFwChainName)
|
||||
|
||||
err = dropUnmarkedTrafficRules(pod.name, pod.namespace, podFwChainName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set mark to indicate traffic from/to the pod passed network policies.
|
||||
// Mark will be checked to explicitly ACCEPT the traffic
|
||||
comment := "\"set mark to ACCEPT traffic that comply to network policies\""
|
||||
args := []string{"-A", podFwChainName, "-m", "comment", "--comment", comment, "-j", "MARK", "--set-mark", "0x20000/0x20000", "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return activePodFwChains, nil
|
||||
}
|
||||
|
||||
// setup rules to jump to applicable network policy chains for the traffic from/to the pod
|
||||
func (npc *NetworkPolicyController) setupPodNetpolRules(pod *podInfo, podFwChainName string, networkPoliciesInfo []networkPolicyInfo, version string) {
|
||||
|
||||
hasIngressPolicy := false
|
||||
hasEgressPolicy := false
|
||||
|
||||
// add entries in pod firewall to run through applicable network policies
|
||||
for _, policy := range networkPoliciesInfo {
|
||||
if _, ok := policy.targetPods[pod.ip]; !ok {
|
||||
continue
|
||||
}
|
||||
comment := "\"run through nw policy " + policy.name + "\""
|
||||
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
|
||||
var args []string
|
||||
switch policy.policyType {
|
||||
case "both":
|
||||
hasIngressPolicy = true
|
||||
hasEgressPolicy = true
|
||||
args = []string{"-I", podFwChainName, "1", "-m", "comment", "--comment", comment, "-j", policyChainName, "\n"}
|
||||
case "ingress":
|
||||
hasIngressPolicy = true
|
||||
args = []string{"-I", podFwChainName, "1", "-d", pod.ip, "-m", "comment", "--comment", comment, "-j", policyChainName, "\n"}
|
||||
case "egress":
|
||||
hasEgressPolicy = true
|
||||
args = []string{"-I", podFwChainName, "1", "-s", pod.ip, "-m", "comment", "--comment", comment, "-j", policyChainName, "\n"}
|
||||
}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// if pod does not have any network policy which applies rules for pod's ingress traffic
|
||||
// then apply default network policy
|
||||
if !hasIngressPolicy {
|
||||
comment := "\"run through default ingress network policy chain\""
|
||||
args := []string{"-I", podFwChainName, "1", "-d", pod.ip, "-m", "comment", "--comment", comment, "-j", kubeDefaultNetpolChain, "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// if pod does not have any network policy which applies rules for pod's egress traffic
|
||||
// then apply default network policy
|
||||
if !hasEgressPolicy {
|
||||
comment := "\"run through default egress network policy chain\""
|
||||
args := []string{"-I", podFwChainName, "1", "-s", pod.ip, "-m", "comment", "--comment", comment, "-j", kubeDefaultNetpolChain, "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
comment := "\"rule to permit the traffic traffic to pods when source is the pod's local node\""
|
||||
args := []string{"-I", podFwChainName, "1", "-m", "comment", "--comment", comment, "-m", "addrtype", "--src-type", "LOCAL", "-d", pod.ip, "-j", "ACCEPT", "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
// ensure statefull firewall that permits RELATED,ESTABLISHED traffic from/to the pod
|
||||
comment = "\"rule for stateful firewall for pod\""
|
||||
args = []string{"-I", podFwChainName, "1", "-m", "comment", "--comment", comment, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT", "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) interceptPodInboundTraffic(pod *podInfo, podFwChainName string) {
|
||||
// ensure there is rule in filter table and FORWARD chain to jump to pod specific firewall chain
|
||||
// this rule applies to the traffic getting routed (coming for other node pods)
|
||||
comment := "\"rule to jump traffic destined to POD name:" + pod.name + " namespace: " + pod.namespace +
|
||||
" to chain " + podFwChainName + "\""
|
||||
args := []string{"-I", kubeForwardChainName, "1", "-m", "comment", "--comment", comment, "-d", pod.ip, "-j", podFwChainName + "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
// ensure there is rule in filter table and OUTPUT chain to jump to pod specific firewall chain
|
||||
// this rule applies to the traffic from a pod getting routed back to another pod on same node by service proxy
|
||||
args = []string{"-I", kubeOutputChainName, "1", "-m", "comment", "--comment", comment, "-d", pod.ip, "-j", podFwChainName + "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
|
||||
// ensure there is rule in filter table and forward chain to jump to pod specific firewall chain
|
||||
// this rule applies to the traffic getting switched (coming for same node pods)
|
||||
comment = "\"rule to jump traffic destined to POD name:" + pod.name + " namespace: " + pod.namespace +
|
||||
" to chain " + podFwChainName + "\""
|
||||
args = []string{"-I", kubeForwardChainName, "1", "-m", "physdev", "--physdev-is-bridged",
|
||||
"-m", "comment", "--comment", comment,
|
||||
"-d", pod.ip,
|
||||
"-j", podFwChainName, "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// setup iptable rules to intercept outbound traffic from pods and run it across the
|
||||
// firewall chain corresponding to the pod so that egress network policies are enforced
|
||||
func (npc *NetworkPolicyController) interceptPodOutboundTraffic(pod *podInfo, podFwChainName string) {
|
||||
egressFilterChains := []string{kubeInputChainName, kubeForwardChainName, kubeOutputChainName}
|
||||
for _, chain := range egressFilterChains {
|
||||
// ensure there is rule in filter table and FORWARD chain to jump to pod specific firewall chain
|
||||
// this rule applies to the traffic getting forwarded/routed (traffic from the pod destinted
|
||||
// to pod on a different node)
|
||||
comment := "\"rule to jump traffic from POD name:" + pod.name + " namespace: " + pod.namespace +
|
||||
" to chain " + podFwChainName + "\""
|
||||
args := []string{"-I", chain, "1", "-m", "comment", "--comment", comment, "-s", pod.ip, "-j", podFwChainName, "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
// ensure there is rule in filter table and forward chain to jump to pod specific firewall chain
|
||||
// this rule applies to the traffic getting switched (coming for same node pods)
|
||||
comment := "\"rule to jump traffic from POD name:" + pod.name + " namespace: " + pod.namespace +
|
||||
" to chain " + podFwChainName + "\""
|
||||
args := []string{"-I", kubeForwardChainName, "1", "-m", "physdev", "--physdev-is-bridged",
|
||||
"-m", "comment", "--comment", comment,
|
||||
"-s", pod.ip,
|
||||
"-j", podFwChainName, "\n"}
|
||||
npc.filterTableRules.WriteString(strings.Join(args, " "))
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) getLocalPods(nodeIP string) (*map[string]podInfo, error) {
|
||||
localPods := make(map[string]podInfo)
|
||||
for _, obj := range npc.podLister.List() {
|
||||
pod := obj.(*api.Pod)
|
||||
// ignore the pods running on the different node and pods that are not actionable
|
||||
if strings.Compare(pod.Status.HostIP, nodeIP) != 0 || !isNetPolActionable(pod) {
|
||||
continue
|
||||
}
|
||||
localPods[pod.Status.PodIP] = podInfo{ip: pod.Status.PodIP,
|
||||
name: pod.ObjectMeta.Name,
|
||||
namespace: pod.ObjectMeta.Namespace,
|
||||
labels: pod.ObjectMeta.Labels}
|
||||
}
|
||||
return &localPods, nil
|
||||
}
|
||||
|
||||
func podFirewallChainName(namespace, podName string, version string) string {
|
||||
hash := sha256.Sum256([]byte(namespace + podName + version))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubePodFirewallChainPrefix + encoded[:16]
|
||||
}
|
@ -1,805 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/netpol/policy.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/k3s/pkg/agent/netpol/utils"
|
||||
api "k8s.io/api/core/v1"
|
||||
networking "k8s.io/api/networking/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func (npc *NetworkPolicyController) newNetworkPolicyEventHandler() cache.ResourceEventHandler {
|
||||
return cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
npc.OnNetworkPolicyUpdate(obj)
|
||||
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
npc.OnNetworkPolicyUpdate(newObj)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
npc.handleNetworkPolicyDelete(obj)
|
||||
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// OnNetworkPolicyUpdate handles updates to network policy from the kubernetes api server
|
||||
func (npc *NetworkPolicyController) OnNetworkPolicyUpdate(obj interface{}) {
|
||||
netpol := obj.(*networking.NetworkPolicy)
|
||||
klog.V(2).Infof("Received update for network policy: %s/%s", netpol.Namespace, netpol.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) handleNetworkPolicyDelete(obj interface{}) {
|
||||
netpol, ok := obj.(*networking.NetworkPolicy)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
klog.Errorf("unexpected object type: %v", obj)
|
||||
return
|
||||
}
|
||||
if netpol, ok = tombstone.Obj.(*networking.NetworkPolicy); !ok {
|
||||
klog.Errorf("unexpected object type: %v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
klog.V(2).Infof("Received network policy: %s/%s delete event", netpol.Namespace, netpol.Name)
|
||||
|
||||
npc.RequestFullSync()
|
||||
}
|
||||
|
||||
// Configure iptables rules representing each network policy. All pod's matched by
|
||||
// network policy spec podselector labels are grouped together in one ipset which
|
||||
// is used for matching destination ip address. Each ingress rule in the network
|
||||
// policyspec is evaluated to set of matching pods, which are grouped in to a
|
||||
// ipset used for source ip addr matching.
|
||||
func (npc *NetworkPolicyController) syncNetworkPolicyChains(networkPoliciesInfo []networkPolicyInfo, version string) (map[string]bool, map[string]bool, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
endTime := time.Since(start)
|
||||
klog.V(2).Infof("Syncing network policy chains took %v", endTime)
|
||||
}()
|
||||
|
||||
klog.V(1).Infof("Attempting to attain ipset mutex lock")
|
||||
npc.ipsetMutex.Lock()
|
||||
klog.V(1).Infof("Attained ipset mutex lock, continuing...")
|
||||
defer func() {
|
||||
npc.ipsetMutex.Unlock()
|
||||
klog.V(1).Infof("Returned ipset mutex lock")
|
||||
}()
|
||||
|
||||
ipset, err := utils.NewIPSet(false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = ipset.Save()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
npc.ipSetHandler = ipset
|
||||
|
||||
activePolicyChains := make(map[string]bool)
|
||||
activePolicyIPSets := make(map[string]bool)
|
||||
|
||||
// run through all network policies
|
||||
for _, policy := range networkPoliciesInfo {
|
||||
|
||||
// ensure there is a unique chain per network policy in filter table
|
||||
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
|
||||
npc.filterTableRules.WriteString(":" + policyChainName + "\n")
|
||||
|
||||
activePolicyChains[policyChainName] = true
|
||||
|
||||
currnetPodIps := make([]string, 0, len(policy.targetPods))
|
||||
for ip := range policy.targetPods {
|
||||
currnetPodIps = append(currnetPodIps, ip)
|
||||
}
|
||||
|
||||
if policy.policyType == "both" || policy.policyType == "ingress" {
|
||||
// create a ipset for all destination pod ip's matched by the policy spec PodSelector
|
||||
targetDestPodIPSetName := policyDestinationPodIPSetName(policy.namespace, policy.name)
|
||||
setEntries := make([][]string, 0)
|
||||
for _, podIP := range currnetPodIps {
|
||||
setEntries = append(setEntries, []string{podIP, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(targetDestPodIPSetName, setEntries, utils.TypeHashIP)
|
||||
err = npc.processIngressRules(policy, targetDestPodIPSetName, activePolicyIPSets, version)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
activePolicyIPSets[targetDestPodIPSetName] = true
|
||||
}
|
||||
if policy.policyType == "both" || policy.policyType == "egress" {
|
||||
// create a ipset for all source pod ip's matched by the policy spec PodSelector
|
||||
targetSourcePodIPSetName := policySourcePodIPSetName(policy.namespace, policy.name)
|
||||
setEntries := make([][]string, 0)
|
||||
for _, podIP := range currnetPodIps {
|
||||
setEntries = append(setEntries, []string{podIP, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(targetSourcePodIPSetName, setEntries, utils.TypeHashIP)
|
||||
err = npc.processEgressRules(policy, targetSourcePodIPSetName, activePolicyIPSets, version)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
activePolicyIPSets[targetSourcePodIPSetName] = true
|
||||
}
|
||||
}
|
||||
|
||||
err = npc.ipSetHandler.Restore()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to perform ipset restore: %s", err.Error())
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Iptables chains in the filter table are synchronized with the network policies.")
|
||||
|
||||
return activePolicyChains, activePolicyIPSets, nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) processIngressRules(policy networkPolicyInfo,
|
||||
targetDestPodIPSetName string, activePolicyIPSets map[string]bool, version string) error {
|
||||
|
||||
// From network policy spec: "If field 'Ingress' is empty then this NetworkPolicy does not allow any traffic "
|
||||
// so no whitelist rules to be added to the network policy
|
||||
if policy.ingressRules == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
|
||||
|
||||
// run through all the ingress rules in the spec and create iptables rules
|
||||
// in the chain for the network policy
|
||||
for i, ingressRule := range policy.ingressRules {
|
||||
|
||||
if len(ingressRule.srcPods) != 0 {
|
||||
srcPodIPSetName := policyIndexedSourcePodIPSetName(policy.namespace, policy.name, i)
|
||||
activePolicyIPSets[srcPodIPSetName] = true
|
||||
setEntries := make([][]string, 0)
|
||||
for _, pod := range ingressRule.srcPods {
|
||||
setEntries = append(setEntries, []string{pod.ip, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(srcPodIPSetName, setEntries, utils.TypeHashIP)
|
||||
|
||||
if len(ingressRule.ports) != 0 {
|
||||
// case where 'ports' details and 'from' details specified in the ingress rule
|
||||
// so match on specified source and destination ip's and specified port (if any) and protocol
|
||||
for _, portProtocol := range ingressRule.ports {
|
||||
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, srcPodIPSetName, targetDestPodIPSetName, portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(ingressRule.namedPorts) != 0 {
|
||||
for j, endPoints := range ingressRule.namedPorts {
|
||||
namedPortIPSetName := policyIndexedIngressNamedPortIPSetName(policy.namespace, policy.name, i, j)
|
||||
activePolicyIPSets[namedPortIPSetName] = true
|
||||
setEntries := make([][]string, 0)
|
||||
for _, ip := range endPoints.ips {
|
||||
setEntries = append(setEntries, []string{ip, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(namedPortIPSetName, setEntries, utils.TypeHashIP)
|
||||
|
||||
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, srcPodIPSetName, namedPortIPSetName, endPoints.protocol, endPoints.port, endPoints.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(ingressRule.ports) == 0 && len(ingressRule.namedPorts) == 0 {
|
||||
// case where no 'ports' details specified in the ingress rule but 'from' details specified
|
||||
// so match on specified source and destination ip with all port and protocol
|
||||
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, srcPodIPSetName, targetDestPodIPSetName, "", "", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// case where only 'ports' details specified but no 'from' details in the ingress rule
|
||||
// so match on all sources, with specified port (if any) and protocol
|
||||
if ingressRule.matchAllSource && !ingressRule.matchAllPorts {
|
||||
for _, portProtocol := range ingressRule.ports {
|
||||
comment := "rule to ACCEPT traffic from all sources to dest pods selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, "", targetDestPodIPSetName, portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for j, endPoints := range ingressRule.namedPorts {
|
||||
namedPortIPSetName := policyIndexedIngressNamedPortIPSetName(policy.namespace, policy.name, i, j)
|
||||
activePolicyIPSets[namedPortIPSetName] = true
|
||||
setEntries := make([][]string, 0)
|
||||
for _, ip := range endPoints.ips {
|
||||
setEntries = append(setEntries, []string{ip, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(namedPortIPSetName, setEntries, utils.TypeHashIP)
|
||||
|
||||
comment := "rule to ACCEPT traffic from all sources to dest pods selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, "", namedPortIPSetName, endPoints.protocol, endPoints.port, endPoints.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// case where nether ports nor from details are speified in the ingress rule
|
||||
// so match on all ports, protocol, source IP's
|
||||
if ingressRule.matchAllSource && ingressRule.matchAllPorts {
|
||||
comment := "rule to ACCEPT traffic from all sources to dest pods selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, "", targetDestPodIPSetName, "", "", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(ingressRule.srcIPBlocks) != 0 {
|
||||
srcIPBlockIPSetName := policyIndexedSourceIPBlockIPSetName(policy.namespace, policy.name, i)
|
||||
activePolicyIPSets[srcIPBlockIPSetName] = true
|
||||
npc.ipSetHandler.RefreshSet(srcIPBlockIPSetName, ingressRule.srcIPBlocks, utils.TypeHashNet)
|
||||
|
||||
if !ingressRule.matchAllPorts {
|
||||
for _, portProtocol := range ingressRule.ports {
|
||||
comment := "rule to ACCEPT traffic from specified ipBlocks to dest pods selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, srcIPBlockIPSetName, targetDestPodIPSetName, portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for j, endPoints := range ingressRule.namedPorts {
|
||||
namedPortIPSetName := policyIndexedIngressNamedPortIPSetName(policy.namespace, policy.name, i, j)
|
||||
activePolicyIPSets[namedPortIPSetName] = true
|
||||
setEntries := make([][]string, 0)
|
||||
for _, ip := range endPoints.ips {
|
||||
setEntries = append(setEntries, []string{ip, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(namedPortIPSetName, setEntries, utils.TypeHashNet)
|
||||
comment := "rule to ACCEPT traffic from specified ipBlocks to dest pods selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, srcIPBlockIPSetName, namedPortIPSetName, endPoints.protocol, endPoints.port, endPoints.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if ingressRule.matchAllPorts {
|
||||
comment := "rule to ACCEPT traffic from specified ipBlocks to dest pods selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, srcIPBlockIPSetName, targetDestPodIPSetName, "", "", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) processEgressRules(policy networkPolicyInfo,
|
||||
targetSourcePodIPSetName string, activePolicyIPSets map[string]bool, version string) error {
|
||||
|
||||
// From network policy spec: "If field 'Ingress' is empty then this NetworkPolicy does not allow any traffic "
|
||||
// so no whitelist rules to be added to the network policy
|
||||
if policy.egressRules == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
|
||||
|
||||
// run through all the egress rules in the spec and create iptables rules
|
||||
// in the chain for the network policy
|
||||
for i, egressRule := range policy.egressRules {
|
||||
|
||||
if len(egressRule.dstPods) != 0 {
|
||||
dstPodIPSetName := policyIndexedDestinationPodIPSetName(policy.namespace, policy.name, i)
|
||||
activePolicyIPSets[dstPodIPSetName] = true
|
||||
setEntries := make([][]string, 0)
|
||||
for _, pod := range egressRule.dstPods {
|
||||
setEntries = append(setEntries, []string{pod.ip, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(dstPodIPSetName, setEntries, utils.TypeHashIP)
|
||||
if len(egressRule.ports) != 0 {
|
||||
// case where 'ports' details and 'from' details specified in the egress rule
|
||||
// so match on specified source and destination ip's and specified port (if any) and protocol
|
||||
for _, portProtocol := range egressRule.ports {
|
||||
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, dstPodIPSetName, portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(egressRule.namedPorts) != 0 {
|
||||
for j, endPoints := range egressRule.namedPorts {
|
||||
namedPortIPSetName := policyIndexedEgressNamedPortIPSetName(policy.namespace, policy.name, i, j)
|
||||
activePolicyIPSets[namedPortIPSetName] = true
|
||||
setEntries := make([][]string, 0)
|
||||
for _, ip := range endPoints.ips {
|
||||
setEntries = append(setEntries, []string{ip, utils.OptionTimeout, "0"})
|
||||
}
|
||||
npc.ipSetHandler.RefreshSet(namedPortIPSetName, setEntries, utils.TypeHashIP)
|
||||
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, namedPortIPSetName, endPoints.protocol, endPoints.port, endPoints.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(egressRule.ports) == 0 && len(egressRule.namedPorts) == 0 {
|
||||
// case where no 'ports' details specified in the ingress rule but 'from' details specified
|
||||
// so match on specified source and destination ip with all port and protocol
|
||||
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, dstPodIPSetName, "", "", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// case where only 'ports' details specified but no 'to' details in the egress rule
|
||||
// so match on all sources, with specified port (if any) and protocol
|
||||
if egressRule.matchAllDestinations && !egressRule.matchAllPorts {
|
||||
for _, portProtocol := range egressRule.ports {
|
||||
comment := "rule to ACCEPT traffic from source pods to all destinations selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, "", portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, portProtocol := range egressRule.namedPorts {
|
||||
comment := "rule to ACCEPT traffic from source pods to all destinations selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, "", portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// case where nether ports nor from details are speified in the egress rule
|
||||
// so match on all ports, protocol, source IP's
|
||||
if egressRule.matchAllDestinations && egressRule.matchAllPorts {
|
||||
comment := "rule to ACCEPT traffic from source pods to all destinations selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, "", "", "", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(egressRule.dstIPBlocks) != 0 {
|
||||
dstIPBlockIPSetName := policyIndexedDestinationIPBlockIPSetName(policy.namespace, policy.name, i)
|
||||
activePolicyIPSets[dstIPBlockIPSetName] = true
|
||||
npc.ipSetHandler.RefreshSet(dstIPBlockIPSetName, egressRule.dstIPBlocks, utils.TypeHashNet)
|
||||
if !egressRule.matchAllPorts {
|
||||
for _, portProtocol := range egressRule.ports {
|
||||
comment := "rule to ACCEPT traffic from source pods to specified ipBlocks selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, dstIPBlockIPSetName, portProtocol.protocol, portProtocol.port, portProtocol.endport); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if egressRule.matchAllPorts {
|
||||
comment := "rule to ACCEPT traffic from source pods to specified ipBlocks selected by policy name: " +
|
||||
policy.name + " namespace " + policy.namespace
|
||||
if err := npc.appendRuleToPolicyChain(policyChainName, comment, targetSourcePodIPSetName, dstIPBlockIPSetName, "", "", ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) appendRuleToPolicyChain(policyChainName, comment, srcIPSetName, dstIPSetName, protocol, dPort, endDport string) error {
|
||||
|
||||
args := make([]string, 0)
|
||||
args = append(args, "-A", policyChainName)
|
||||
|
||||
if comment != "" {
|
||||
args = append(args, "-m", "comment", "--comment", "\""+comment+"\"")
|
||||
}
|
||||
if srcIPSetName != "" {
|
||||
args = append(args, "-m", "set", "--match-set", srcIPSetName, "src")
|
||||
}
|
||||
if dstIPSetName != "" {
|
||||
args = append(args, "-m", "set", "--match-set", dstIPSetName, "dst")
|
||||
}
|
||||
if protocol != "" {
|
||||
args = append(args, "-p", protocol)
|
||||
}
|
||||
if dPort != "" {
|
||||
if endDport != "" {
|
||||
multiport := fmt.Sprintf("%s:%s", dPort, endDport)
|
||||
args = append(args, "--dport", multiport)
|
||||
} else {
|
||||
args = append(args, "--dport", dPort)
|
||||
}
|
||||
}
|
||||
|
||||
markArgs := append(args, "-j", "MARK", "--set-xmark", "0x10000/0x10000", "\n")
|
||||
npc.filterTableRules.WriteString(strings.Join(markArgs, " "))
|
||||
|
||||
returnArgs := append(args, "-m", "mark", "--mark", "0x10000/0x10000", "-j", "RETURN", "\n")
|
||||
npc.filterTableRules.WriteString(strings.Join(returnArgs, " "))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) buildNetworkPoliciesInfo() ([]networkPolicyInfo, error) {
|
||||
|
||||
NetworkPolicies := make([]networkPolicyInfo, 0)
|
||||
|
||||
for _, policyObj := range npc.npLister.List() {
|
||||
|
||||
policy, ok := policyObj.(*networking.NetworkPolicy)
|
||||
podSelector, _ := v1.LabelSelectorAsSelector(&policy.Spec.PodSelector)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert")
|
||||
}
|
||||
newPolicy := networkPolicyInfo{
|
||||
name: policy.Name,
|
||||
namespace: policy.Namespace,
|
||||
podSelector: podSelector,
|
||||
policyType: "ingress",
|
||||
}
|
||||
|
||||
ingressType, egressType := false, false
|
||||
for _, policyType := range policy.Spec.PolicyTypes {
|
||||
if policyType == networking.PolicyTypeIngress {
|
||||
ingressType = true
|
||||
}
|
||||
if policyType == networking.PolicyTypeEgress {
|
||||
egressType = true
|
||||
}
|
||||
}
|
||||
if ingressType && egressType {
|
||||
newPolicy.policyType = "both"
|
||||
} else if egressType {
|
||||
newPolicy.policyType = "egress"
|
||||
} else if ingressType {
|
||||
newPolicy.policyType = "ingress"
|
||||
}
|
||||
|
||||
matchingPods, err := npc.ListPodsByNamespaceAndLabels(policy.Namespace, podSelector)
|
||||
newPolicy.targetPods = make(map[string]podInfo)
|
||||
namedPort2IngressEps := make(namedPort2eps)
|
||||
if err == nil {
|
||||
for _, matchingPod := range matchingPods {
|
||||
if !isNetPolActionable(matchingPod) {
|
||||
continue
|
||||
}
|
||||
newPolicy.targetPods[matchingPod.Status.PodIP] = podInfo{ip: matchingPod.Status.PodIP,
|
||||
name: matchingPod.ObjectMeta.Name,
|
||||
namespace: matchingPod.ObjectMeta.Namespace,
|
||||
labels: matchingPod.ObjectMeta.Labels}
|
||||
npc.grabNamedPortFromPod(matchingPod, &namedPort2IngressEps)
|
||||
}
|
||||
}
|
||||
|
||||
if policy.Spec.Ingress == nil {
|
||||
newPolicy.ingressRules = nil
|
||||
} else {
|
||||
newPolicy.ingressRules = make([]ingressRule, 0)
|
||||
}
|
||||
|
||||
if policy.Spec.Egress == nil {
|
||||
newPolicy.egressRules = nil
|
||||
} else {
|
||||
newPolicy.egressRules = make([]egressRule, 0)
|
||||
}
|
||||
|
||||
for _, specIngressRule := range policy.Spec.Ingress {
|
||||
ingressRule := ingressRule{}
|
||||
ingressRule.srcPods = make([]podInfo, 0)
|
||||
ingressRule.srcIPBlocks = make([][]string, 0)
|
||||
|
||||
// If this field is empty or missing in the spec, this rule matches all sources
|
||||
if len(specIngressRule.From) == 0 {
|
||||
ingressRule.matchAllSource = true
|
||||
} else {
|
||||
ingressRule.matchAllSource = false
|
||||
for _, peer := range specIngressRule.From {
|
||||
if peerPods, err := npc.evalPodPeer(policy, peer); err == nil {
|
||||
for _, peerPod := range peerPods {
|
||||
if !isNetPolActionable(peerPod) {
|
||||
continue
|
||||
}
|
||||
ingressRule.srcPods = append(ingressRule.srcPods,
|
||||
podInfo{ip: peerPod.Status.PodIP,
|
||||
name: peerPod.ObjectMeta.Name,
|
||||
namespace: peerPod.ObjectMeta.Namespace,
|
||||
labels: peerPod.ObjectMeta.Labels})
|
||||
}
|
||||
}
|
||||
ingressRule.srcIPBlocks = append(ingressRule.srcIPBlocks, npc.evalIPBlockPeer(peer)...)
|
||||
}
|
||||
}
|
||||
|
||||
ingressRule.ports = make([]protocolAndPort, 0)
|
||||
ingressRule.namedPorts = make([]endPoints, 0)
|
||||
// If this field is empty or missing in the spec, this rule matches all ports
|
||||
if len(specIngressRule.Ports) == 0 {
|
||||
ingressRule.matchAllPorts = true
|
||||
} else {
|
||||
ingressRule.matchAllPorts = false
|
||||
ingressRule.ports, ingressRule.namedPorts = npc.processNetworkPolicyPorts(specIngressRule.Ports, namedPort2IngressEps)
|
||||
}
|
||||
|
||||
newPolicy.ingressRules = append(newPolicy.ingressRules, ingressRule)
|
||||
}
|
||||
|
||||
for _, specEgressRule := range policy.Spec.Egress {
|
||||
egressRule := egressRule{}
|
||||
egressRule.dstPods = make([]podInfo, 0)
|
||||
egressRule.dstIPBlocks = make([][]string, 0)
|
||||
namedPort2EgressEps := make(namedPort2eps)
|
||||
|
||||
// If this field is empty or missing in the spec, this rule matches all sources
|
||||
if len(specEgressRule.To) == 0 {
|
||||
egressRule.matchAllDestinations = true
|
||||
// if rule.To is empty but rule.Ports not, we must try to grab NamedPort from pods that in same namespace,
|
||||
// so that we can design iptables rule to describe "match all dst but match some named dst-port" egress rule
|
||||
if policyRulePortsHasNamedPort(specEgressRule.Ports) {
|
||||
matchingPeerPods, _ := npc.ListPodsByNamespaceAndLabels(policy.Namespace, labels.Everything())
|
||||
for _, peerPod := range matchingPeerPods {
|
||||
if !isNetPolActionable(peerPod) {
|
||||
continue
|
||||
}
|
||||
npc.grabNamedPortFromPod(peerPod, &namedPort2EgressEps)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
egressRule.matchAllDestinations = false
|
||||
for _, peer := range specEgressRule.To {
|
||||
if peerPods, err := npc.evalPodPeer(policy, peer); err == nil {
|
||||
for _, peerPod := range peerPods {
|
||||
if !isNetPolActionable(peerPod) {
|
||||
continue
|
||||
}
|
||||
egressRule.dstPods = append(egressRule.dstPods,
|
||||
podInfo{ip: peerPod.Status.PodIP,
|
||||
name: peerPod.ObjectMeta.Name,
|
||||
namespace: peerPod.ObjectMeta.Namespace,
|
||||
labels: peerPod.ObjectMeta.Labels})
|
||||
npc.grabNamedPortFromPod(peerPod, &namedPort2EgressEps)
|
||||
}
|
||||
|
||||
}
|
||||
egressRule.dstIPBlocks = append(egressRule.dstIPBlocks, npc.evalIPBlockPeer(peer)...)
|
||||
}
|
||||
}
|
||||
|
||||
egressRule.ports = make([]protocolAndPort, 0)
|
||||
egressRule.namedPorts = make([]endPoints, 0)
|
||||
// If this field is empty or missing in the spec, this rule matches all ports
|
||||
if len(specEgressRule.Ports) == 0 {
|
||||
egressRule.matchAllPorts = true
|
||||
} else {
|
||||
egressRule.matchAllPorts = false
|
||||
egressRule.ports, egressRule.namedPorts = npc.processNetworkPolicyPorts(specEgressRule.Ports, namedPort2EgressEps)
|
||||
}
|
||||
|
||||
newPolicy.egressRules = append(newPolicy.egressRules, egressRule)
|
||||
}
|
||||
NetworkPolicies = append(NetworkPolicies, newPolicy)
|
||||
}
|
||||
|
||||
return NetworkPolicies, nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) evalPodPeer(policy *networking.NetworkPolicy, peer networking.NetworkPolicyPeer) ([]*api.Pod, error) {
|
||||
|
||||
var matchingPods []*api.Pod
|
||||
matchingPods = make([]*api.Pod, 0)
|
||||
var err error
|
||||
// spec can have both PodSelector AND NamespaceSelector
|
||||
if peer.NamespaceSelector != nil {
|
||||
namespaceSelector, _ := v1.LabelSelectorAsSelector(peer.NamespaceSelector)
|
||||
namespaces, err := npc.ListNamespaceByLabels(namespaceSelector)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to build network policies info due to " + err.Error())
|
||||
}
|
||||
|
||||
podSelector := labels.Everything()
|
||||
if peer.PodSelector != nil {
|
||||
podSelector, _ = v1.LabelSelectorAsSelector(peer.PodSelector)
|
||||
}
|
||||
for _, namespace := range namespaces {
|
||||
namespacePods, err := npc.ListPodsByNamespaceAndLabels(namespace.Name, podSelector)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to build network policies info due to " + err.Error())
|
||||
}
|
||||
matchingPods = append(matchingPods, namespacePods...)
|
||||
}
|
||||
} else if peer.PodSelector != nil {
|
||||
podSelector, _ := v1.LabelSelectorAsSelector(peer.PodSelector)
|
||||
matchingPods, err = npc.ListPodsByNamespaceAndLabels(policy.Namespace, podSelector)
|
||||
}
|
||||
|
||||
return matchingPods, err
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) processNetworkPolicyPorts(npPorts []networking.NetworkPolicyPort, namedPort2eps namedPort2eps) (numericPorts []protocolAndPort, namedPorts []endPoints) {
|
||||
numericPorts, namedPorts = make([]protocolAndPort, 0), make([]endPoints, 0)
|
||||
for _, npPort := range npPorts {
|
||||
var protocol string
|
||||
if npPort.Protocol != nil {
|
||||
protocol = string(*npPort.Protocol)
|
||||
}
|
||||
if npPort.Port == nil {
|
||||
numericPorts = append(numericPorts, protocolAndPort{port: "", protocol: protocol})
|
||||
} else if npPort.Port.Type == intstr.Int {
|
||||
var portproto protocolAndPort
|
||||
if npPort.EndPort != nil {
|
||||
if *npPort.EndPort >= npPort.Port.IntVal {
|
||||
portproto.endport = strconv.Itoa(int(*npPort.EndPort))
|
||||
}
|
||||
}
|
||||
portproto.protocol, portproto.port = protocol, npPort.Port.String()
|
||||
numericPorts = append(numericPorts, portproto)
|
||||
} else {
|
||||
if protocol2eps, ok := namedPort2eps[npPort.Port.String()]; ok {
|
||||
if numericPort2eps, ok := protocol2eps[protocol]; ok {
|
||||
for _, eps := range numericPort2eps {
|
||||
namedPorts = append(namedPorts, *eps)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) ListPodsByNamespaceAndLabels(namespace string, podSelector labels.Selector) (ret []*api.Pod, err error) {
|
||||
podLister := listers.NewPodLister(npc.podLister)
|
||||
allMatchedNameSpacePods, err := podLister.Pods(namespace).List(podSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return allMatchedNameSpacePods, nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) ListNamespaceByLabels(namespaceSelector labels.Selector) ([]*api.Namespace, error) {
|
||||
namespaceLister := listers.NewNamespaceLister(npc.nsLister)
|
||||
matchedNamespaces, err := namespaceLister.List(namespaceSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return matchedNamespaces, nil
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) evalIPBlockPeer(peer networking.NetworkPolicyPeer) [][]string {
|
||||
ipBlock := make([][]string, 0)
|
||||
if peer.PodSelector == nil && peer.NamespaceSelector == nil && peer.IPBlock != nil {
|
||||
if cidr := peer.IPBlock.CIDR; strings.HasSuffix(cidr, "/0") {
|
||||
ipBlock = append(ipBlock, []string{"0.0.0.0/1", utils.OptionTimeout, "0"}, []string{"128.0.0.0/1", utils.OptionTimeout, "0"})
|
||||
} else {
|
||||
ipBlock = append(ipBlock, []string{cidr, utils.OptionTimeout, "0"})
|
||||
}
|
||||
for _, except := range peer.IPBlock.Except {
|
||||
if strings.HasSuffix(except, "/0") {
|
||||
ipBlock = append(ipBlock, []string{"0.0.0.0/1", utils.OptionTimeout, "0", utils.OptionNoMatch}, []string{"128.0.0.0/1", utils.OptionTimeout, "0", utils.OptionNoMatch})
|
||||
} else {
|
||||
ipBlock = append(ipBlock, []string{except, utils.OptionTimeout, "0", utils.OptionNoMatch})
|
||||
}
|
||||
}
|
||||
}
|
||||
return ipBlock
|
||||
}
|
||||
|
||||
func (npc *NetworkPolicyController) grabNamedPortFromPod(pod *api.Pod, namedPort2eps *namedPort2eps) {
|
||||
if pod == nil || namedPort2eps == nil {
|
||||
return
|
||||
}
|
||||
for k := range pod.Spec.Containers {
|
||||
for _, port := range pod.Spec.Containers[k].Ports {
|
||||
name := port.Name
|
||||
protocol := string(port.Protocol)
|
||||
containerPort := strconv.Itoa(int(port.ContainerPort))
|
||||
|
||||
if (*namedPort2eps)[name] == nil {
|
||||
(*namedPort2eps)[name] = make(protocol2eps)
|
||||
}
|
||||
if (*namedPort2eps)[name][protocol] == nil {
|
||||
(*namedPort2eps)[name][protocol] = make(numericPort2eps)
|
||||
}
|
||||
if eps, ok := (*namedPort2eps)[name][protocol][containerPort]; !ok {
|
||||
(*namedPort2eps)[name][protocol][containerPort] = &endPoints{
|
||||
ips: []string{pod.Status.PodIP},
|
||||
protocolAndPort: protocolAndPort{port: containerPort, protocol: protocol},
|
||||
}
|
||||
} else {
|
||||
eps.ips = append(eps.ips, pod.Status.PodIP)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func networkPolicyChainName(namespace, policyName string, version string) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + version))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeNetworkPolicyChainPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policySourcePodIPSetName(namespace, policyName string) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeSourceIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyDestinationPodIPSetName(namespace, policyName string) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeDestinationIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyIndexedSourcePodIPSetName(namespace, policyName string, ingressRuleNo int) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + "ingressrule" + strconv.Itoa(ingressRuleNo) + "pod"))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeSourceIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyIndexedDestinationPodIPSetName(namespace, policyName string, egressRuleNo int) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + "egressrule" + strconv.Itoa(egressRuleNo) + "pod"))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeDestinationIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyIndexedSourceIPBlockIPSetName(namespace, policyName string, ingressRuleNo int) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + "ingressrule" + strconv.Itoa(ingressRuleNo) + "ipblock"))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeSourceIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyIndexedDestinationIPBlockIPSetName(namespace, policyName string, egressRuleNo int) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + "egressrule" + strconv.Itoa(egressRuleNo) + "ipblock"))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeDestinationIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyIndexedIngressNamedPortIPSetName(namespace, policyName string, ingressRuleNo, namedPortNo int) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + "ingressrule" + strconv.Itoa(ingressRuleNo) + strconv.Itoa(namedPortNo) + "namedport"))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeDestinationIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyIndexedEgressNamedPortIPSetName(namespace, policyName string, egressRuleNo, namedPortNo int) string {
|
||||
hash := sha256.Sum256([]byte(namespace + policyName + "egressrule" + strconv.Itoa(egressRuleNo) + strconv.Itoa(namedPortNo) + "namedport"))
|
||||
encoded := base32.StdEncoding.EncodeToString(hash[:])
|
||||
return kubeDestinationIPSetPrefix + encoded[:16]
|
||||
}
|
||||
|
||||
func policyRulePortsHasNamedPort(npPorts []networking.NetworkPolicyPort) bool {
|
||||
for _, npPort := range npPorts {
|
||||
if npPort.Port != nil && npPort.Port.Type == intstr.String {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/controllers/netpol/utils.go
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
PodCompleted api.PodPhase = "Completed"
|
||||
)
|
||||
|
||||
// isPodUpdateNetPolRelevant checks the attributes that we care about for building NetworkPolicies on the host and if it
|
||||
// finds a relevant change, it returns true otherwise it returns false. The things we care about for NetworkPolicies:
|
||||
// 1) Is the phase of the pod changing? (matters for catching completed, succeeded, or failed jobs)
|
||||
// 2) Is the pod IP changing? (changes how the network policy is applied to the host)
|
||||
// 3) Is the pod's host IP changing? (should be caught in the above, with the CNI kube-router runs with but we check this as well for sanity)
|
||||
// 4) Is a pod's label changing? (potentially changes which NetworkPolicies select this pod)
|
||||
func isPodUpdateNetPolRelevant(oldPod, newPod *api.Pod) bool {
|
||||
return newPod.Status.Phase != oldPod.Status.Phase ||
|
||||
newPod.Status.PodIP != oldPod.Status.PodIP ||
|
||||
!reflect.DeepEqual(newPod.Status.PodIPs, oldPod.Status.PodIPs) ||
|
||||
newPod.Status.HostIP != oldPod.Status.HostIP ||
|
||||
!reflect.DeepEqual(newPod.Labels, oldPod.Labels)
|
||||
}
|
||||
|
||||
func isNetPolActionable(pod *api.Pod) bool {
|
||||
return !isFinished(pod) && pod.Status.PodIP != "" && !pod.Spec.HostNetwork
|
||||
}
|
||||
|
||||
func isFinished(pod *api.Pod) bool {
|
||||
switch pod.Status.Phase {
|
||||
case api.PodFailed, api.PodSucceeded, PodCompleted:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateNodePortRange(nodePortOption string) (string, error) {
|
||||
nodePortValidator := regexp.MustCompile(`^([0-9]+)[:-]([0-9]+)$`)
|
||||
if matched := nodePortValidator.MatchString(nodePortOption); !matched {
|
||||
return "", fmt.Errorf("failed to parse node port range given: '%s' please see specification in help text", nodePortOption)
|
||||
}
|
||||
matches := nodePortValidator.FindStringSubmatch(nodePortOption)
|
||||
if len(matches) != 3 {
|
||||
return "", fmt.Errorf("could not parse port number from range given: '%s'", nodePortOption)
|
||||
}
|
||||
port1, err := strconv.ParseUint(matches[1], 10, 16)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not parse first port number from range given: '%s'", nodePortOption)
|
||||
}
|
||||
port2, err := strconv.ParseUint(matches[2], 10, 16)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not parse second port number from range given: '%s'", nodePortOption)
|
||||
}
|
||||
if port1 >= port2 {
|
||||
return "", fmt.Errorf("port 1 is greater than or equal to port 2 in range given: '%s'", nodePortOption)
|
||||
}
|
||||
return fmt.Sprintf("%d:%d", port1, port2), nil
|
||||
}
|
@ -1,593 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/utils/ipset.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// Error returned when ipset binary is not found.
|
||||
errIpsetNotFound = errors.New("ipset utility not found")
|
||||
)
|
||||
|
||||
const (
|
||||
// FamillyInet IPV4.
|
||||
FamillyInet = "inet"
|
||||
// FamillyInet6 IPV6.
|
||||
FamillyInet6 = "inet6"
|
||||
|
||||
// DefaultMaxElem Default OptionMaxElem value.
|
||||
DefaultMaxElem = "65536"
|
||||
// DefaultHasSize Default OptionHashSize value.
|
||||
DefaultHasSize = "1024"
|
||||
|
||||
// TypeHashIP The hash:ip set type uses a hash to store IP host addresses (default) or network addresses. Zero valued IP address cannot be stored in a hash:ip type of set.
|
||||
TypeHashIP = "hash:ip"
|
||||
// TypeHashMac The hash:mac set type uses a hash to store MAC addresses. Zero valued MAC addresses cannot be stored in a hash:mac type of set.
|
||||
TypeHashMac = "hash:mac"
|
||||
// TypeHashNet The hash:net set type uses a hash to store different sized IP network addresses. Network address with zero prefix size cannot be stored in this type of sets.
|
||||
TypeHashNet = "hash:net"
|
||||
// TypeHashNetNet The hash:net,net set type uses a hash to store pairs of different sized IP network addresses. Bear in mind that the first parameter has precedence over the second, so a nomatch entry could be potentially be ineffective if a more specific first parameter existed with a suitable second parameter. Network address with zero prefix size cannot be stored in this type of set.
|
||||
TypeHashNetNet = "hash:net,net"
|
||||
// TypeHashIPPort The hash:ip,port set type uses a hash to store IP address and port number pairs. The port number is interpreted together with a protocol (default TCP) and zero protocol number cannot be used.
|
||||
TypeHashIPPort = "hash:ip,port"
|
||||
// TypeHashNetPort The hash:net,port set type uses a hash to store different sized IP network address and port pairs. The port number is interpreted together with a protocol (default TCP) and zero protocol number cannot be used. Network address with zero prefix size is not accepted either.
|
||||
TypeHashNetPort = "hash:net,port"
|
||||
// TypeHashIPPortIP The hash:ip,port,ip set type uses a hash to store IP address, port number and a second IP address triples. The port number is interpreted together with a protocol (default TCP) and zero protocol number cannot be used.
|
||||
TypeHashIPPortIP = "hash:ip,port,ip"
|
||||
// TypeHashIPPortNet The hash:ip,port,net set type uses a hash to store IP address, port number and IP network address triples. The port number is interpreted together with a protocol (default TCP) and zero protocol number cannot be used. Network address with zero prefix size cannot be stored either.
|
||||
TypeHashIPPortNet = "hash:ip,port,net"
|
||||
// TypeHashIPMark The hash:ip,mark set type uses a hash to store IP address and packet mark pairs.
|
||||
TypeHashIPMark = "hash:ip,mark"
|
||||
// TypeHashIPNetPortNet The hash:net,port,net set type behaves similarly to hash:ip,port,net but accepts a cidr value for both the first and last parameter. Either subnet is permitted to be a /0 should you wish to match port between all destinations.
|
||||
TypeHashIPNetPortNet = "hash:net,port,net"
|
||||
// TypeHashNetIface The hash:net,iface set type uses a hash to store different sized IP network address and interface name pairs.
|
||||
TypeHashNetIface = "hash:net,iface"
|
||||
// TypeListSet The list:set type uses a simple list in which you can store set names.
|
||||
TypeListSet = "list:set"
|
||||
|
||||
// OptionTimeout All set types supports the optional timeout parameter when creating a set and adding entries. The value of the timeout parameter for the create command means the default timeout value (in seconds) for new entries. If a set is created with timeout support, then the same timeout option can be used to specify non-default timeout values when adding entries. Zero timeout value means the entry is added permanent to the set. The timeout value of already added elements can be changed by readding the element using the -exist option. When listing the set, the number of entries printed in the header might be larger than the listed number of entries for sets with the timeout extensions: the number of entries in the set is updated when elements added/deleted to the set and periodically when the garbage colletor evicts the timed out entries.`
|
||||
OptionTimeout = "timeout"
|
||||
// OptionCounters All set types support the optional counters option when creating a set. If the option is specified then the set is created with packet and byte counters per element support. The packet and byte counters are initialized to zero when the elements are (re-)added to the set, unless the packet and byte counter values are explicitly specified by the packets and bytes options. An example when an element is added to a set with non-zero counter values.
|
||||
OptionCounters = "counters"
|
||||
// OptionPackets All set types support the optional counters option when creating a set. If the option is specified then the set is created with packet and byte counters per element support. The packet and byte counters are initialized to zero when the elements are (re-)added to the set, unless the packet and byte counter values are explicitly specified by the packets and bytes options. An example when an element is added to a set with non-zero counter values.
|
||||
OptionPackets = "packets"
|
||||
// OptionBytes All set types support the optional counters option when creating a set. If the option is specified then the set is created with packet and byte counters per element support. The packet and byte counters are initialized to zero when the elements are (re-)added to the set, unless the packet and byte counter values are explicitly specified by the packets and bytes options. An example when an element is added to a set with non-zero counter values.
|
||||
OptionBytes = "bytes"
|
||||
// OptionComment All set types support the optional comment extension. Enabling this extension on an ipset enables you to annotate an ipset entry with an arbitrary string. This string is completely ignored by both the kernel and ipset itself and is purely for providing a convenient means to document the reason for an entry's existence. Comments must not contain any quotation marks and the usual escape character (\) has no meaning
|
||||
OptionComment = "comment"
|
||||
// OptionSkbinfo All set types support the optional skbinfo extension. This extension allow to store the metainfo (firewall mark, tc class and hardware queue) with every entry and map it to packets by usage of SET netfilter target with --map-set option. skbmark option format: MARK or MARK/MASK, where MARK and MASK are 32bit hex numbers with 0x prefix. If only mark is specified mask 0xffffffff are used. skbprio option has tc class format: MAJOR:MINOR, where major and minor numbers are hex without 0x prefix. skbqueue option is just decimal number.
|
||||
OptionSkbinfo = "skbinfo"
|
||||
// OptionSkbmark All set types support the optional skbinfo extension. This extension allow to store the metainfo (firewall mark, tc class and hardware queue) with every entry and map it to packets by usage of SET netfilter target with --map-set option. skbmark option format: MARK or MARK/MASK, where MARK and MASK are 32bit hex numbers with 0x prefix. If only mark is specified mask 0xffffffff are used. skbprio option has tc class format: MAJOR:MINOR, where major and minor numbers are hex without 0x prefix. skbqueue option is just decimal number.
|
||||
OptionSkbmark = "skbmark"
|
||||
// OptionSkbprio All set types support the optional skbinfo extension. This extension allow to store the metainfo (firewall mark, tc class and hardware queue) with every entry and map it to packets by usage of SET netfilter target with --map-set option. skbmark option format: MARK or MARK/MASK, where MARK and MASK are 32bit hex numbers with 0x prefix. If only mark is specified mask 0xffffffff are used. skbprio option has tc class format: MAJOR:MINOR, where major and minor numbers are hex without 0x prefix. skbqueue option is just decimal number.
|
||||
OptionSkbprio = "skbprio"
|
||||
// OptionSkbqueue All set types support the optional skbinfo extension. This extension allow to store the metainfo (firewall mark, tc class and hardware queue) with every entry and map it to packets by usage of SET netfilter target with --map-set option. skbmark option format: MARK or MARK/MASK, where MARK and MASK are 32bit hex numbers with 0x prefix. If only mark is specified mask 0xffffffff are used. skbprio option has tc class format: MAJOR:MINOR, where major and minor numbers are hex without 0x prefix. skbqueue option is just decimal number.
|
||||
OptionSkbqueue = "skbqueue"
|
||||
// OptionHashSize This parameter is valid for the create command of all hash type sets. It defines the initial hash size for the set, default is 1024. The hash size must be a power of two, the kernel automatically rounds up non power of two hash sizes to the first correct value.
|
||||
OptionHashSize = "hashsize"
|
||||
// OptionMaxElem This parameter is valid for the create command of all hash type sets. It does define the maximal number of elements which can be stored in the set, default 65536.
|
||||
OptionMaxElem = "maxelem"
|
||||
// OptionFamilly This parameter is valid for the create command of all hash type sets except for hash:mac. It defines the protocol family of the IP addresses to be stored in the set. The default is inet, i.e IPv4.
|
||||
OptionFamilly = "family"
|
||||
// OptionNoMatch The hash set types which can store net type of data (i.e. hash:*net*) support the optional nomatch option when adding entries. When matching elements in the set, entries marked as nomatch are skipped as if those were not added to the set, which makes possible to build up sets with exceptions. See the example at hash type hash:net below. When elements are tested by ipset, the nomatch flags are taken into account. If one wants to test the existence of an element marked with nomatch in a set, then the flag must be specified too.
|
||||
OptionNoMatch = "nomatch"
|
||||
// OptionForceAdd All hash set types support the optional forceadd parameter when creating a set. When sets created with this option become full the next addition to the set may succeed and evict a random entry from the set.
|
||||
OptionForceAdd = "forceadd"
|
||||
|
||||
// tmpIPSetPrefix Is the prefix added to temporary ipset names used in the atomic swap operations during ipset restore. You should never see these on your system because they only exist during the restore.
|
||||
tmpIPSetPrefix = "TMP-"
|
||||
)
|
||||
|
||||
// IPSet represent ipset sets managed by.
|
||||
type IPSet struct {
|
||||
ipSetPath *string
|
||||
Sets map[string]*Set
|
||||
isIpv6 bool
|
||||
}
|
||||
|
||||
// Set represent a ipset set entry.
|
||||
type Set struct {
|
||||
Parent *IPSet
|
||||
Name string
|
||||
Entries []*Entry
|
||||
Options []string
|
||||
}
|
||||
|
||||
// Entry of ipset Set.
|
||||
type Entry struct {
|
||||
Set *Set
|
||||
Options []string
|
||||
}
|
||||
|
||||
// Get ipset binary path or return an error.
|
||||
func getIPSetPath() (*string, error) {
|
||||
path, err := exec.LookPath("ipset")
|
||||
if err != nil {
|
||||
return nil, errIpsetNotFound
|
||||
}
|
||||
return &path, nil
|
||||
}
|
||||
|
||||
// Used to run ipset binary with args and return stdout.
|
||||
func (ipset *IPSet) run(args ...string) (string, error) {
|
||||
var stderr bytes.Buffer
|
||||
var stdout bytes.Buffer
|
||||
cmd := exec.Cmd{
|
||||
Path: *ipset.ipSetPath,
|
||||
Args: append([]string{*ipset.ipSetPath}, args...),
|
||||
Stderr: &stderr,
|
||||
Stdout: &stdout,
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", errors.New(stderr.String())
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
}
|
||||
|
||||
// Used to run ipset binary with arg and inject stdin buffer and return stdout.
|
||||
func (ipset *IPSet) runWithStdin(stdin *bytes.Buffer, args ...string) (string, error) {
|
||||
var stderr bytes.Buffer
|
||||
var stdout bytes.Buffer
|
||||
cmd := exec.Cmd{
|
||||
Path: *ipset.ipSetPath,
|
||||
Args: append([]string{*ipset.ipSetPath}, args...),
|
||||
Stderr: &stderr,
|
||||
Stdout: &stdout,
|
||||
Stdin: stdin,
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", errors.New(stderr.String())
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
}
|
||||
|
||||
// NewIPSet create a new IPSet with ipSetPath initialized.
|
||||
func NewIPSet(isIpv6 bool) (*IPSet, error) {
|
||||
ipSetPath, err := getIPSetPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ipSet := &IPSet{
|
||||
ipSetPath: ipSetPath,
|
||||
Sets: make(map[string]*Set),
|
||||
isIpv6: isIpv6,
|
||||
}
|
||||
return ipSet, nil
|
||||
}
|
||||
|
||||
// Create a set identified with setname and specified type. The type may
|
||||
// require type specific options. Does not create set on the system if it
|
||||
// already exists by the same name.
|
||||
func (ipset *IPSet) Create(setName string, createOptions ...string) (*Set, error) {
|
||||
// Populate Set map if needed
|
||||
if ipset.Get(setName) == nil {
|
||||
ipset.Sets[setName] = &Set{
|
||||
Name: setName,
|
||||
Options: createOptions,
|
||||
Parent: ipset,
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if set with the same name is already active on the system
|
||||
setIsActive, err := ipset.Sets[setName].IsActive()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine if ipset set %s exists: %s",
|
||||
setName, err)
|
||||
}
|
||||
|
||||
// Create set if missing from the system
|
||||
if !setIsActive {
|
||||
if ipset.isIpv6 {
|
||||
// Add "family inet6" option and a "inet6:" prefix for IPv6 sets.
|
||||
args := []string{"create", "-exist", ipset.Sets[setName].name()}
|
||||
args = append(args, createOptions...)
|
||||
args = append(args, "family", "inet6")
|
||||
if _, err := ipset.run(args...); err != nil {
|
||||
return nil, fmt.Errorf("failed to create ipset set on system: %s", err)
|
||||
}
|
||||
} else {
|
||||
_, err := ipset.run(append([]string{"create", "-exist", setName},
|
||||
createOptions...)...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ipset set on system: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ipset.Sets[setName], nil
|
||||
}
|
||||
|
||||
// Add a given Set to an IPSet
|
||||
func (ipset *IPSet) Add(set *Set) error {
|
||||
_, err := ipset.Create(set.Name, set.Options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := make([][]string, len(set.Entries))
|
||||
for index, entry := range set.Entries {
|
||||
options[index] = entry.Options
|
||||
}
|
||||
|
||||
err = ipset.Get(set.Name).BatchAdd(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshSet add/update internal Sets with a Set of entries but does not run restore command
|
||||
func (ipset *IPSet) RefreshSet(setName string, entriesWithOptions [][]string, setType string) {
|
||||
if ipset.Get(setName) == nil {
|
||||
ipset.Sets[setName] = &Set{
|
||||
Name: setName,
|
||||
Options: []string{setType, OptionTimeout, "0"},
|
||||
Parent: ipset,
|
||||
}
|
||||
}
|
||||
entries := make([]*Entry, len(entriesWithOptions))
|
||||
for i, entry := range entriesWithOptions {
|
||||
entries[i] = &Entry{Set: ipset.Sets[setName], Options: entry}
|
||||
}
|
||||
ipset.Get(setName).Entries = entries
|
||||
}
|
||||
|
||||
// Add a given entry to the set. If the -exist option is specified, ipset
|
||||
// ignores if the entry already added to the set.
|
||||
// Note: if you need to add multiple entries (e.g., in a loop), use BatchAdd instead,
|
||||
// as it’s much more performant.
|
||||
func (set *Set) Add(addOptions ...string) (*Entry, error) {
|
||||
entry := &Entry{
|
||||
Set: set,
|
||||
Options: addOptions,
|
||||
}
|
||||
set.Entries = append(set.Entries, entry)
|
||||
_, err := set.Parent.run(append([]string{"add", "-exist", entry.Set.name()}, addOptions...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// BatchAdd given entries (with their options) to the set.
|
||||
// For multiple items, this is much faster than Add().
|
||||
func (set *Set) BatchAdd(addOptions [][]string) error {
|
||||
newEntries := make([]*Entry, len(addOptions))
|
||||
for index, options := range addOptions {
|
||||
entry := &Entry{
|
||||
Set: set,
|
||||
Options: options,
|
||||
}
|
||||
newEntries[index] = entry
|
||||
}
|
||||
set.Entries = append(set.Entries, newEntries...)
|
||||
|
||||
// Build the `restore` command contents
|
||||
var builder strings.Builder
|
||||
for _, options := range addOptions {
|
||||
line := strings.Join(append([]string{"add", "-exist", set.name()}, options...), " ")
|
||||
builder.WriteString(line + "\n")
|
||||
}
|
||||
restoreContents := builder.String()
|
||||
|
||||
// Invoke the command
|
||||
_, err := set.Parent.runWithStdin(bytes.NewBufferString(restoreContents), "restore")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Del an entry from a set. If the -exist option is specified and the entry is
|
||||
// not in the set (maybe already expired), then the command is ignored.
|
||||
func (entry *Entry) Del() error {
|
||||
_, err := entry.Set.Parent.run(append([]string{"del", entry.Set.name()}, entry.Options...)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = entry.Set.Parent.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Test whether an entry is in a set or not. Exit status number is zero if the
|
||||
// tested entry is in the set and nonzero if it is missing from the set.
|
||||
func (set *Set) Test(testOptions ...string) (bool, error) {
|
||||
_, err := set.Parent.run(append([]string{"test", set.name()}, testOptions...)...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Destroy the specified set or all the sets if none is given. If the set has
|
||||
// got reference(s), nothing is done and no set destroyed.
|
||||
func (set *Set) Destroy() error {
|
||||
_, err := set.Parent.run("destroy", set.name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(set.Parent.Sets, set.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy the specified set by name. If the set has got reference(s), nothing
|
||||
// is done and no set destroyed. If the IPSet does not contain the named set
|
||||
// then Destroy is a no-op.
|
||||
func (ipset *IPSet) Destroy(setName string) error {
|
||||
set := ipset.Get(setName)
|
||||
if set == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := set.Destroy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DestroyAllWithin destroys all sets contained within the IPSet's Sets.
|
||||
func (ipset *IPSet) DestroyAllWithin() error {
|
||||
for _, v := range ipset.Sets {
|
||||
err := v.Destroy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsActive checks if a set exists on the system with the same name.
|
||||
func (set *Set) IsActive() (bool, error) {
|
||||
_, err := set.Parent.run("list", set.name())
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "name does not exist") {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (set *Set) name() string {
|
||||
if set.Parent.isIpv6 {
|
||||
return "inet6:" + set.Name
|
||||
}
|
||||
return set.Name
|
||||
}
|
||||
|
||||
// Parse ipset save stdout.
|
||||
// ex:
|
||||
// create KUBE-DST-3YNVZWWGX3UQQ4VQ hash:ip family inet hashsize 1024 maxelem 65536 timeout 0
|
||||
// add KUBE-DST-3YNVZWWGX3UQQ4VQ 100.96.1.6 timeout 0
|
||||
func parseIPSetSave(ipset *IPSet, result string) map[string]*Set {
|
||||
sets := make(map[string]*Set)
|
||||
// Save is always in order
|
||||
lines := strings.Split(result, "\n")
|
||||
for _, line := range lines {
|
||||
content := strings.Split(line, " ")
|
||||
if content[0] == "create" {
|
||||
sets[content[1]] = &Set{
|
||||
Parent: ipset,
|
||||
Name: content[1],
|
||||
Options: content[2:],
|
||||
}
|
||||
} else if content[0] == "add" {
|
||||
set := sets[content[1]]
|
||||
set.Entries = append(set.Entries, &Entry{
|
||||
Set: set,
|
||||
Options: content[2:],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return sets
|
||||
}
|
||||
|
||||
// Build ipset restore input
|
||||
// ex:
|
||||
// create KUBE-DST-3YNVZWWGX3UQQ4VQ hash:ip family inet hashsize 1024 maxelem 65536 timeout 0
|
||||
// add KUBE-DST-3YNVZWWGX3UQQ4VQ 100.96.1.6 timeout 0
|
||||
func buildIPSetRestore(ipset *IPSet) string {
|
||||
setNames := make([]string, 0, len(ipset.Sets))
|
||||
for setName := range ipset.Sets {
|
||||
// we need setNames in some consistent order so that we can unit-test this method has a predictable output:
|
||||
setNames = append(setNames, setName)
|
||||
}
|
||||
|
||||
sort.Strings(setNames)
|
||||
|
||||
tmpSets := map[string]string{}
|
||||
ipSetRestore := &strings.Builder{}
|
||||
for _, setName := range setNames {
|
||||
set := ipset.Sets[setName]
|
||||
setOptions := strings.Join(set.Options, " ")
|
||||
|
||||
tmpSetName := tmpSets[setOptions]
|
||||
if tmpSetName == "" {
|
||||
// create a temporary set per unique set-options:
|
||||
hash := sha1.Sum([]byte("tmp:" + setOptions))
|
||||
tmpSetName = tmpIPSetPrefix + base32.StdEncoding.EncodeToString(hash[:10])
|
||||
ipSetRestore.WriteString(fmt.Sprintf("create %s %s\n", tmpSetName, setOptions))
|
||||
// just in case we are starting up after a crash, we should flush the TMP ipset to be safe if it
|
||||
// already existed, so we do not pollute other ipsets:
|
||||
ipSetRestore.WriteString(fmt.Sprintf("flush %s\n", tmpSetName))
|
||||
tmpSets[setOptions] = tmpSetName
|
||||
}
|
||||
|
||||
for _, entry := range set.Entries {
|
||||
// add entries to the tmp set:
|
||||
ipSetRestore.WriteString(fmt.Sprintf("add %s %s\n", tmpSetName, strings.Join(entry.Options, " ")))
|
||||
}
|
||||
|
||||
// now create the actual IPSet (this is a noop if it already exists, because we run with -exists):
|
||||
ipSetRestore.WriteString(fmt.Sprintf("create %s %s\n", set.Name, setOptions))
|
||||
|
||||
// now that both exist, we can swap them:
|
||||
ipSetRestore.WriteString(fmt.Sprintf("swap %s %s\n", tmpSetName, set.Name))
|
||||
|
||||
// empty the tmp set (which is actually the old one now):
|
||||
ipSetRestore.WriteString(fmt.Sprintf("flush %s\n", tmpSetName))
|
||||
}
|
||||
|
||||
setsToDestroy := make([]string, 0, len(tmpSets))
|
||||
for _, tmpSetName := range tmpSets {
|
||||
setsToDestroy = append(setsToDestroy, tmpSetName)
|
||||
}
|
||||
// need to destroy the sets in a predictable order for unit test!
|
||||
sort.Strings(setsToDestroy)
|
||||
for _, tmpSetName := range setsToDestroy {
|
||||
// finally, destroy the tmp sets.
|
||||
ipSetRestore.WriteString(fmt.Sprintf("destroy %s\n", tmpSetName))
|
||||
}
|
||||
|
||||
return ipSetRestore.String()
|
||||
}
|
||||
|
||||
// Save the given set, or all sets if none is given to stdout in a format that
|
||||
// restore can read. The option -file can be used to specify a filename instead
|
||||
// of stdout.
|
||||
// save "ipset save" command output to ipset.sets.
|
||||
func (ipset *IPSet) Save() error {
|
||||
stdout, err := ipset.run("save")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ipset.Sets = parseIPSetSave(ipset, stdout)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restore a saved session generated by save. The saved session can be fed from
|
||||
// stdin or the option -file can be used to specify a filename instead of
|
||||
// stdin. Please note, existing sets and elements are not erased by restore
|
||||
// unless specified so in the restore file. All commands are allowed in restore
|
||||
// mode except list, help, version, interactive mode and restore itself.
|
||||
// Send formatted ipset.sets into stdin of "ipset restore" command.
|
||||
func (ipset *IPSet) Restore() error {
|
||||
stdin := bytes.NewBufferString(buildIPSetRestore(ipset))
|
||||
_, err := ipset.runWithStdin(stdin, "restore", "-exist")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush all entries from the specified set or flush all sets if none is given.
|
||||
func (set *Set) Flush() error {
|
||||
_, err := set.Parent.run("flush", set.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush all entries from the specified set or flush all sets if none is given.
|
||||
func (ipset *IPSet) Flush() error {
|
||||
_, err := ipset.run("flush")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get Set by Name.
|
||||
func (ipset *IPSet) Get(setName string) *Set {
|
||||
set, ok := ipset.Sets[setName]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
// Rename a set. Set identified by SETNAME-TO must not exist.
|
||||
func (set *Set) Rename(newName string) error {
|
||||
if set.Parent.isIpv6 {
|
||||
newName = "ipv6:" + newName
|
||||
}
|
||||
_, err := set.Parent.run("rename", set.name(), newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Swap the content of two sets, or in another words, exchange the name of two
|
||||
// sets. The referred sets must exist and compatible type of sets can be
|
||||
// swapped only.
|
||||
func (set *Set) Swap(setTo *Set) error {
|
||||
_, err := set.Parent.run("swap", set.name(), setTo.name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Refresh a Set with new entries.
|
||||
func (set *Set) Refresh(entries []string, extraOptions ...string) error {
|
||||
entriesWithOptions := make([][]string, len(entries))
|
||||
|
||||
for index, entry := range entries {
|
||||
entriesWithOptions[index] = append([]string{entry}, extraOptions...)
|
||||
}
|
||||
|
||||
return set.RefreshWithBuiltinOptions(entriesWithOptions)
|
||||
}
|
||||
|
||||
// RefreshWithBuiltinOptions refresh a Set with new entries with built-in options.
|
||||
func (set *Set) RefreshWithBuiltinOptions(entries [][]string) error {
|
||||
var err error
|
||||
|
||||
// The set-name must be < 32 characters!
|
||||
tempName := set.Name + "-"
|
||||
|
||||
newSet := &Set{
|
||||
Parent: set.Parent,
|
||||
Name: tempName,
|
||||
Options: set.Options,
|
||||
}
|
||||
|
||||
err = set.Parent.Add(newSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = newSet.BatchAdd(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = set.Swap(newSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = set.Parent.Destroy(tempName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/utils/ipset_test.go
|
||||
|
||||
package utils
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_UnitbuildIPSetRestore(t *testing.T) {
|
||||
type args struct {
|
||||
ipset *IPSet
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "simple-restore",
|
||||
args: args{
|
||||
ipset: &IPSet{Sets: map[string]*Set{
|
||||
"foo": {
|
||||
Name: "foo",
|
||||
Options: []string{"hash:ip", "yolo", "things", "12345"},
|
||||
Entries: []*Entry{
|
||||
{Options: []string{"1.2.3.4"}},
|
||||
},
|
||||
},
|
||||
"google-dns-servers": {
|
||||
Name: "google-dns-servers",
|
||||
Options: []string{"hash:ip", "lol"},
|
||||
Entries: []*Entry{
|
||||
{Options: []string{"4.4.4.4"}},
|
||||
{Options: []string{"8.8.8.8"}},
|
||||
},
|
||||
},
|
||||
// this one and the one above share the same exact options -- and therefore will reuse the same
|
||||
// tmp ipset:
|
||||
"more-ip-addresses": {
|
||||
Name: "google-dns-servers",
|
||||
Options: []string{"hash:ip", "lol"},
|
||||
Entries: []*Entry{
|
||||
{Options: []string{"5.5.5.5"}},
|
||||
{Options: []string{"6.6.6.6"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
want: "create TMP-7NOTZDOMLXBX6DAJ hash:ip yolo things 12345\n" +
|
||||
"flush TMP-7NOTZDOMLXBX6DAJ\n" +
|
||||
"add TMP-7NOTZDOMLXBX6DAJ 1.2.3.4\n" +
|
||||
"create foo hash:ip yolo things 12345\n" +
|
||||
"swap TMP-7NOTZDOMLXBX6DAJ foo\n" +
|
||||
"flush TMP-7NOTZDOMLXBX6DAJ\n" +
|
||||
"create TMP-XD7BSSQZELS7TP35 hash:ip lol\n" +
|
||||
"flush TMP-XD7BSSQZELS7TP35\n" +
|
||||
"add TMP-XD7BSSQZELS7TP35 4.4.4.4\n" +
|
||||
"add TMP-XD7BSSQZELS7TP35 8.8.8.8\n" +
|
||||
"create google-dns-servers hash:ip lol\n" +
|
||||
"swap TMP-XD7BSSQZELS7TP35 google-dns-servers\n" +
|
||||
"flush TMP-XD7BSSQZELS7TP35\n" +
|
||||
"add TMP-XD7BSSQZELS7TP35 5.5.5.5\n" +
|
||||
"add TMP-XD7BSSQZELS7TP35 6.6.6.6\n" +
|
||||
"create google-dns-servers hash:ip lol\n" +
|
||||
"swap TMP-XD7BSSQZELS7TP35 google-dns-servers\n" +
|
||||
"flush TMP-XD7BSSQZELS7TP35\n" +
|
||||
"destroy TMP-7NOTZDOMLXBX6DAJ\n" +
|
||||
"destroy TMP-XD7BSSQZELS7TP35\n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildIPSetRestore(tt.args.ipset); got != tt.want {
|
||||
t.Errorf("buildIPSetRestore() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/utils/iptables.go
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var hasWait bool
|
||||
|
||||
func init() {
|
||||
path, err := exec.LookPath("iptables-restore")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
args := []string{"iptables-restore", "--help"}
|
||||
cmd := exec.Cmd{
|
||||
Path: path,
|
||||
Args: args,
|
||||
}
|
||||
cmdOutput, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
hasWait = strings.Contains(string(cmdOutput), "wait")
|
||||
}
|
||||
|
||||
// SaveInto calls `iptables-save` for given table and stores result in a given buffer.
|
||||
func SaveInto(table string, buffer *bytes.Buffer) error {
|
||||
path, err := exec.LookPath("iptables-save")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderrBuffer := bytes.NewBuffer(nil)
|
||||
args := []string{"iptables-save", "-t", table}
|
||||
cmd := exec.Cmd{
|
||||
Path: path,
|
||||
Args: args,
|
||||
Stdout: buffer,
|
||||
Stderr: stderrBuffer,
|
||||
}
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("%v (%s)", err, stderrBuffer)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restore runs `iptables-restore` passing data through []byte.
|
||||
func Restore(table string, data []byte) error {
|
||||
path, err := exec.LookPath("iptables-restore")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var args []string
|
||||
if hasWait {
|
||||
args = []string{"iptables-restore", "--wait", "-T", table}
|
||||
} else {
|
||||
args = []string{"iptables-restore", "-T", table}
|
||||
}
|
||||
cmd := exec.Cmd{
|
||||
Path: path,
|
||||
Args: args,
|
||||
Stdin: bytes.NewBuffer(data),
|
||||
}
|
||||
b, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v (%s)", err, b)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
|
||||
// - modified from https://github.com/cloudnativelabs/kube-router/blob/73b1b03b32c5755b240f6c077bb097abe3888314/pkg/utils/node.go
|
||||
|
||||
// +build !windows
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// GetNodeObject returns the node API object for the node
|
||||
func GetNodeObject(clientset kubernetes.Interface, hostnameOverride string) (*apiv1.Node, error) {
|
||||
// assuming kube-router is running as pod, first check env NODE_NAME
|
||||
nodeName := os.Getenv("NODE_NAME")
|
||||
if nodeName != "" {
|
||||
node, err := clientset.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
|
||||
// if env NODE_NAME is not set then check if node is register with hostname
|
||||
hostName, _ := os.Hostname()
|
||||
node, err := clientset.CoreV1().Nodes().Get(context.Background(), hostName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// if env NODE_NAME is not set and node is not registered with hostname, then use host name override
|
||||
if hostnameOverride != "" {
|
||||
node, err = clientset.CoreV1().Nodes().Get(context.Background(), hostnameOverride, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return node, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to identify the node by NODE_NAME, hostname or --hostname-override")
|
||||
}
|
||||
|
||||
// GetNodeIP returns the most valid external facing IP address for a node.
|
||||
// Order of preference:
|
||||
// 1. NodeInternalIP
|
||||
// 2. NodeExternalIP (Only set on cloud providers usually)
|
||||
func GetNodeIP(node *apiv1.Node) (net.IP, error) {
|
||||
addresses := node.Status.Addresses
|
||||
addressMap := make(map[apiv1.NodeAddressType][]apiv1.NodeAddress)
|
||||
for i := range addresses {
|
||||
addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i])
|
||||
}
|
||||
if addresses, ok := addressMap[apiv1.NodeInternalIP]; ok {
|
||||
return net.ParseIP(addresses[0].Address), nil
|
||||
}
|
||||
if addresses, ok := addressMap[apiv1.NodeExternalIP]; ok {
|
||||
return net.ParseIP(addresses[0].Address), nil
|
||||
}
|
||||
return nil, errors.New("host IP unknown")
|
||||
}
|
Loading…
Reference in New Issue
Block a user