update k8s to 1.18.4 (#1945)

This commit is contained in:
Hussein Galal 2020-06-24 00:12:14 +02:00 committed by GitHub
parent 198a83a1fc
commit 7f9bf0b6d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 429 additions and 280 deletions

50
go.mod
View File

@ -32,31 +32,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.3-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.3-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.3-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.3-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.3-k3s1
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.4-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.4-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.4-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.4-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.4-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)

86
go.sum
View File

@ -630,49 +630,49 @@ github.com/rancher/helm-controller v0.6.3 h1:tsqSXndQ7Ms8S0mBd2zWOcTV1tiZ4BUYkem
github.com/rancher/helm-controller v0.6.3/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
github.com/rancher/kubernetes v1.18.3-k3s1 h1:QYh2MY+odOzBQedwClFdX1tRtYQADaFRWi+etbBJvuU=
github.com/rancher/kubernetes v1.18.3-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1 h1:wNUROW7IOAbW0pCNtdKvDWIRUKE5pk3jffttSMyGn8s=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1 h1:arrGaUaK4WJ/x0TMSw7wxMDlK+akOfq2Yk4MjMdnkqQ=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1 h1:3rwFiKyQ7FL2hdqQNdJnP0BaM0a8L1tmGf+h1Nrqeno=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1 h1:Ai2duIKWpX9IxW/sQL1ojbAX9KcJ9TPLQsogR9vafJA=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1 h1:TQrvULLk+ESOptqV09QK6zzPu/IV7kJSxLTa9c5OTbE=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1 h1:P3kIcwlm5w/XW8HgpyOYxOm70ZfZEtZm3xpHuOnlx6M=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1 h1:czS0txmHj7i7dRqppu6ekwFigMsZUHMMmaSySuRcQSE=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1 h1:wjE5mqAyInJ33cx0St7jYWdy97O5hRBlxysQCH7kvU4=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1 h1:SBCvaudegFLUmSl0rfariVQjGhsH/A0AV2rq8ys3IU4=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1 h1:/47POpHTRsfFNc0k2Ruq67vqOtPms5FA9TXo9ci6FZc=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1 h1:/VofAOz4+KX9zda4+M8WnE5eDn82ezYyBz7HuzUoBq0=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1 h1:+CsRXq96B0ThQuI0x0i975CBcRKYfrUlR4/s+h3vYxU=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1 h1:qDmjyk7BoZO7TcGpU6YKOZVPn84628tC8s0r8Xz/6M0=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1 h1:Y9ySTyuhR84dJaYzwBHmKxBtHQ2uWIoP9VL4iYCvwUM=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1 h1:K1sU4yqMdWaCGj4jEusK+PFowCCUuhTiH3jUriQD5WU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1 h1:NqvNsqKpXJ7ZAFLOmxj6gvRktx0Oga9DH0M979za6uI=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1 h1:j4l5XcOKNqDtqIMrdDJLQtO+4LAcaojGa/etG6lv354=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1 h1:qqKfrPB2ghGqf/ElQrVmZaVvm+/DSupWAApEe4Zk5Uk=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1 h1:xb3ZtwF3emE38qi8XWjEw+ES4WKe3k4B4Sr8YGFWEHo=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1 h1:r7qvKjbV7XHI3W9a8Jhzsiujrp7d76USez5i1LZNfYc=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.3-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/rancher/kubernetes v1.18.4-k3s1 h1:eTwiRm2Gu8AUDgrda5FK8atZvkh6/3ZDoRFsxmnLvLc=
github.com/rancher/kubernetes v1.18.4-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1 h1:R6Iye4M4FL/qspOzcJvNPJQNnk9n7nu94FBX+jyqcIA=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1 h1:JYqgiFoq95sytgWi1hteMyKvpCZVi5HCXHMFW6sad7Y=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1 h1:UyAGvDwMVjn02+CwO5Z70vCmvDpLvmUem7ReuvoImIw=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1 h1:6h8JjWF0cmorfsKyuLdKEOaGd1fpXgu8VUG5Ynu5v2A=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1 h1:BMh4HW6YDId9g4Xja9MXJOXJI+dRyq/mw/0yKRXAIRo=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1 h1:Ly7X70k/C2xI4iuC2OZ2ktsRzyuVDVLASsQUVkBMwLM=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1 h1:M62wX7hIGwkmRoz6BC4L6d+YXczMxyan29pTfcbDqp8=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1 h1:AtFTyVKCuUHN/afuWhMIcf1vGVfZ/Uz2PpQn45aLKpU=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1 h1:QpQx2AXoU+fASZTpLjGjYT0ChNgc1RKImb8WWsOwK1U=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1 h1:aNSdkAueVRL0XfmUrALXF/NAavlPlkm0zDP/FBWJXTs=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1 h1:Q9wIqlITpmv93PApIKG75m8AnPGXrZeWcjqlUYk7Au8=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1 h1:EENXY3hRYDPo9fuF9EpccUJTgOE0wT8r3oaitEppi24=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1 h1:0h0f2gCJAKw170KoUfPsMi/CgTWHq7tb6G7IUPKun2c=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1 h1:T1NDBQWf93dCAvYC5aCH0Ik3cPoQC+5MOdhg8b4k2OM=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1 h1:tlsv2LXMIEfSLQ5wvfMZQZvUJFwrhupJu5QFxyklavY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1 h1:TkkarxQdJPnr0rs/uJsQwyL/Alo79oT8Yiakbtvq8Y4=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1 h1:b0YH7h5M8+k6LYMP6A/laGXpad4mkNmuPJoMAvhomGo=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1 h1:EsUdJepxaZSTOFZK1UrhmHPoHdnpRC8OtQWXne92vcE=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1 h1:0cbzcnS4yzFz3SyaWokOqSDsECcr/bqxDjcy263Fy54=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1 h1:xHggZMz7a4OpR0dEVz9uy1Iskk3/wzLYoGQcQAdCTtc=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.4-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=

View File

@ -178,7 +178,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
default:
allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`"))
}
if fields.FieldsType != "FieldsV1" {
if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" {
allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`"))
}
}

View File

@ -156,10 +156,7 @@ func createHandler(r rest.NamedCreater, scope *RequestScope, admit admission.Int
if err != nil {
return nil, fmt.Errorf("failed to create new object (Create for %v): %v", scope.Kind, err)
}
obj, err = scope.FieldManager.Update(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))
if err != nil {
return nil, fmt.Errorf("failed to update object (Create for %v) managed fields: %v", scope.Kind, err)
}
obj = scope.FieldManager.UpdateNoErrors(liveObj, obj, managerOrUserAgent(options.FieldManager, req.UserAgent()))
}
if mutatingAdmission, ok := admit.(admission.MutationInterface); ok && mutatingAdmission.Handles(admission.Create) {
if err := mutatingAdmission.Admit(ctx, admissionAttributes, scope); err != nil {

View File

@ -18,12 +18,15 @@ package fieldmanager
import (
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal"
"k8s.io/klog"
openapiproto "k8s.io/kube-openapi/pkg/util/proto"
"sigs.k8s.io/structured-merge-diff/v3/fieldpath"
)
@ -37,6 +40,8 @@ const DefaultMaxUpdateManagers int = 10
// starts being tracked from the object's creation, instead of from the first time the object is applied to.
const DefaultTrackOnCreateProbability float32 = 1
var atMostEverySecond = internal.NewAtMostEvery(time.Second)
// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
type Managed interface {
// Fields gets the fieldpath.ManagedFields.
@ -107,20 +112,26 @@ func newDefaultFieldManager(f Manager, objectCreater runtime.ObjectCreater, kind
func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) {
// If the object doesn't have metadata, we should just return without trying to
// set the managedFields at all, so creates/updates/patches will work normally.
if _, err = meta.Accessor(newObj); err != nil {
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return newObj, nil
}
// First try to decode the managed fields provided in the update,
// This is necessary to allow directly updating managed fields.
var managed Managed
if managed, err = internal.DecodeObjectManagedFields(newObj); err != nil || len(managed.Fields()) == 0 {
if isResetManagedFields(newAccessor.GetManagedFields()) {
managed = internal.NewEmptyManaged()
} else if managed, err = internal.DecodeObjectManagedFields(newAccessor.GetManagedFields()); err != nil || len(managed.Fields()) == 0 {
liveAccessor, err := meta.Accessor(liveObj)
if err != nil {
return newObj, nil
}
// If the managed field is empty or we failed to decode it,
// let's try the live object. This is to prevent clients who
// don't understand managedFields from deleting it accidentally.
managed, err = internal.DecodeObjectManagedFields(liveObj)
if err != nil {
return nil, fmt.Errorf("failed to decode managed fields: %v", err)
if managed, err = internal.DecodeObjectManagedFields(liveAccessor.GetManagedFields()); err != nil {
managed = internal.NewEmptyManaged()
}
}
@ -138,17 +149,52 @@ func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (o
return object, nil
}
// UpdateNoErrors is the same as Update, but it will not return
// errors. If an error happens, the object is returned with
// managedFields cleared.
func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object {
obj, err := f.Update(liveObj, newObj, manager)
if err != nil {
atMostEverySecond.Do(func() {
klog.Errorf("[SHOULD NOT HAPPEN] failed to update managedFields for %v: %v",
newObj.GetObjectKind().GroupVersionKind(),
err)
})
// Explicitly remove managedFields on failure, so that
// we can't have garbage in it.
internal.RemoveObjectManagedFields(newObj)
return newObj
}
return obj
}
// Returns true if the managedFields indicate that the user is trying to
// reset the managedFields, i.e. if the list is non-nil but empty, or if
// the list has one empty item.
func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool {
if len(managedFields) == 0 {
return managedFields != nil
}
if len(managedFields) == 1 {
return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{})
}
return false
}
// Apply is used when server-side apply is called, as it merges the
// object and updates the managed fields.
func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) {
// If the object doesn't have metadata, apply isn't allowed.
if _, err = meta.Accessor(liveObj); err != nil {
accessor, err := meta.Accessor(liveObj)
if err != nil {
return nil, fmt.Errorf("couldn't get accessor: %v", err)
}
// Decode the managed fields in the live object, since it isn't allowed in the patch.
var managed Managed
if managed, err = internal.DecodeObjectManagedFields(liveObj); err != nil {
managed, err := internal.DecodeObjectManagedFields(accessor.GetManagedFields())
if err != nil {
return nil, fmt.Errorf("failed to decode managed fields: %v", err)
}

View File

@ -53,6 +53,11 @@ func (m *managedStruct) Times() map[string]*metav1.Time {
return m.times
}
// NewEmptyManaged creates an empty ManagedInterface.
func NewEmptyManaged() ManagedInterface {
return NewManaged(fieldpath.ManagedFields{}, map[string]*metav1.Time{})
}
// NewManaged creates a ManagedInterface from a fieldpath.ManagedFields and the timestamps associated with each operation.
func NewManaged(f fieldpath.ManagedFields, t map[string]*metav1.Time) ManagedInterface {
return &managedStruct{
@ -73,16 +78,8 @@ func RemoveObjectManagedFields(obj runtime.Object) {
}
// DecodeObjectManagedFields extracts and converts the objects ManagedFields into a fieldpath.ManagedFields.
func DecodeObjectManagedFields(from runtime.Object) (ManagedInterface, error) {
if from == nil {
return &managedStruct{}, nil
}
accessor, err := meta.Accessor(from)
if err != nil {
panic(fmt.Sprintf("couldn't get accessor: %v", err))
}
managed, err := decodeManagedFields(accessor.GetManagedFields())
func DecodeObjectManagedFields(from []metav1.ManagedFieldsEntry) (ManagedInterface, error) {
managed, err := decodeManagedFields(from)
if err != nil {
return nil, fmt.Errorf("failed to convert managed fields from API: %v", err)
}
@ -110,7 +107,16 @@ func EncodeObjectManagedFields(obj runtime.Object, managed ManagedInterface) err
func decodeManagedFields(encodedManagedFields []metav1.ManagedFieldsEntry) (managed managedStruct, err error) {
managed.fields = make(fieldpath.ManagedFields, len(encodedManagedFields))
managed.times = make(map[string]*metav1.Time, len(encodedManagedFields))
for _, encodedVersionedSet := range encodedManagedFields {
for i, encodedVersionedSet := range encodedManagedFields {
switch encodedVersionedSet.FieldsType {
case "FieldsV1":
// Valid case.
case "":
return managedStruct{}, fmt.Errorf("missing fieldsType in managed fields entry %d", i)
default:
return managedStruct{}, fmt.Errorf("invalid fieldsType %q in managed fields entry %d", encodedVersionedSet.FieldsType, i)
}
manager, err := BuildManagerIdentifier(&encodedVersionedSet)
if err != nil {
return managedStruct{}, fmt.Errorf("error decoding manager from %v: %v", encodedVersionedSet, err)

View File

@ -18,14 +18,12 @@ package fieldmanager
import (
"fmt"
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal"
"k8s.io/klog"
openapiproto "k8s.io/kube-openapi/pkg/util/proto"
"sigs.k8s.io/structured-merge-diff/v3/fieldpath"
"sigs.k8s.io/structured-merge-diff/v3/merge"
@ -41,7 +39,6 @@ type structuredMergeManager struct {
}
var _ Manager = &structuredMergeManager{}
var atMostEverySecond = internal.NewAtMostEvery(time.Second)
// NewStructuredMergeManager creates a new Manager that merges apply requests
// and update managed fields for other types of requests.
@ -98,19 +95,11 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed
}
newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned)
if err != nil {
// Return newObj and just by-pass fields update. This really shouldn't happen.
atMostEverySecond.Do(func() {
klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed new object of type %v: %v", newObjVersioned.GetObjectKind().GroupVersionKind(), err)
})
return newObj, managed, nil
return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", newObjVersioned.GetObjectKind().GroupVersionKind(), err)
}
liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned)
if err != nil {
// Return newObj and just by-pass fields update. This really shouldn't happen.
atMostEverySecond.Do(func() {
klog.Errorf("[SHOULD NOT HAPPEN] failed to create typed live object of type %v: %v", liveObjVersioned.GetObjectKind().GroupVersionKind(), err)
})
return newObj, managed, nil
return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", liveObjVersioned.GetObjectKind().GroupVersionKind(), err)
}
apiVersion := fieldpath.APIVersion(f.groupVersion.String())

View File

@ -323,9 +323,7 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r
}
if p.fieldManager != nil {
if objToUpdate, err = p.fieldManager.Update(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent)); err != nil {
return nil, fmt.Errorf("failed to update object (json PATCH for %v) managed fields: %v", p.kind, err)
}
objToUpdate = p.fieldManager.UpdateNoErrors(currentObject, objToUpdate, managerOrUserAgent(p.options.FieldManager, p.userAgent))
}
return objToUpdate, nil
}
@ -408,9 +406,7 @@ func (p *smpPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (ru
}
if p.fieldManager != nil {
if newObj, err = p.fieldManager.Update(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent)); err != nil {
return nil, fmt.Errorf("failed to update object (smp PATCH for %v) managed fields: %v", p.kind, err)
}
newObj = p.fieldManager.UpdateNoErrors(currentObject, newObj, managerOrUserAgent(p.options.FieldManager, p.userAgent))
}
return newObj, nil
}

View File

@ -131,11 +131,7 @@ func UpdateResource(r rest.Updater, scope *RequestScope, admit admission.Interfa
if scope.FieldManager != nil {
transformers = append(transformers, func(_ context.Context, newObj, liveObj runtime.Object) (runtime.Object, error) {
if shouldUpdateManagedFields {
obj, err := scope.FieldManager.Update(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent()))
if err != nil {
return nil, fmt.Errorf("failed to update object (Update for %v) managed fields: %v", scope.Kind, err)
}
return obj, nil
return scope.FieldManager.UpdateNoErrors(liveObj, newObj, managerOrUserAgent(options.FieldManager, req.UserAgent())), nil
}
return newObj, nil
})

View File

@ -3,6 +3,7 @@ package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
@ -35,3 +36,9 @@ filegroup(
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["metrics_test.go"],
embed = [":go_default_library"],
)

View File

@ -28,7 +28,6 @@ import (
"time"
restful "github.com/emicklei/go-restful"
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/types"
utilsets "k8s.io/apimachinery/pkg/util/sets"
@ -48,6 +47,8 @@ type resettableCollector interface {
const (
APIServerComponent string = "apiserver"
OtherContentType string = "other"
OtherRequestMethod string = "other"
)
/*
@ -172,6 +173,37 @@ var (
currentInflightRequests,
requestTerminationsTotal,
}
// these are the known (e.g. whitelisted/known) content types which we will report for
// request metrics. Any other RFC compliant content types will be aggregated under 'unknown'
knownMetricContentTypes = utilsets.NewString(
"application/apply-patch+yaml",
"application/json",
"application/json-patch+json",
"application/merge-patch+json",
"application/strategic-merge-patch+json",
"application/vnd.kubernetes.protobuf",
"application/vnd.kubernetes.protobuf;stream=watch",
"application/yaml",
"text/plain",
"text/plain;charset=utf-8")
// these are the valid request methods which we report in our metrics. Any other request methods
// will be aggregated under 'unknown'
validRequestMethods = utilsets.NewString(
"APPLY",
"CONNECT",
"CREATE",
"DELETE",
"DELETECOLLECTION",
"GET",
"LIST",
"PATCH",
"POST",
"PROXY",
"PUT",
"UPDATE",
"WATCH",
"WATCHLIST")
)
const (
@ -219,6 +251,10 @@ func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInf
// translated to RequestInfo).
// However, we need to tweak it e.g. to differentiate GET from LIST.
verb := canonicalVerb(strings.ToUpper(req.Method), scope)
// set verbs to a bounded set of known and expected verbs
if !validRequestMethods.Has(verb) {
verb = OtherRequestMethod
}
if requestInfo.IsResourceRequest {
requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc()
} else {
@ -256,7 +292,8 @@ func MonitorRequest(req *http.Request, verb, group, version, resource, subresour
reportedVerb := cleanVerb(verb, req)
dryRun := cleanDryRun(req.URL)
elapsedSeconds := elapsed.Seconds()
requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, contentType, codeToString(httpCode)).Inc()
cleanContentType := cleanContentType(contentType)
requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, cleanContentType, codeToString(httpCode)).Inc()
requestLatencies.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds)
// We are only interested in response sizes of read requests.
if verb == "GET" || verb == "LIST" {
@ -311,6 +348,19 @@ func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, c
}
}
// cleanContentType binds the contentType (for metrics related purposes) to a
// bounded set of known/expected content-types.
func cleanContentType(contentType string) string {
normalizedContentType := strings.ToLower(contentType)
if strings.HasSuffix(contentType, " stream=watch") || strings.HasSuffix(contentType, " charset=utf-8") {
normalizedContentType = strings.ReplaceAll(contentType, " ", "")
}
if knownMetricContentTypes.Has(normalizedContentType) {
return normalizedContentType
}
return OtherContentType
}
// CleanScope returns the scope of the request.
func CleanScope(requestInfo *request.RequestInfo) string {
if requestInfo.Namespace != "" {
@ -355,7 +405,10 @@ func cleanVerb(verb string, request *http.Request) string {
if verb == "PATCH" && request.Header.Get("Content-Type") == string(types.ApplyPatchType) && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
reportedVerb = "APPLY"
}
return reportedVerb
if validRequestMethods.Has(reportedVerb) {
return reportedVerb
}
return OtherRequestMethod
}
func cleanDryRun(u *url.URL) string {

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "18"
gitVersion = "v1.18.3-k3s1"
gitCommit = "e3eaf6c5726840b2e31ea2b0a2ea1c2d40ffed2a"
gitVersion = "v1.18.4-k3s1"
gitCommit = "52f8d69fc5bdd2ceb2ea47fb3b51190bba9e5c06"
gitTreeState = "clean"
buildDate = "2020-05-26T21:45:32Z"
buildDate = "2020-06-17T20:02:22Z"
)

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "18"
gitVersion = "v1.18.3-k3s1"
gitCommit = "e3eaf6c5726840b2e31ea2b0a2ea1c2d40ffed2a"
gitVersion = "v1.18.4-k3s1"
gitCommit = "52f8d69fc5bdd2ceb2ea47fb3b51190bba9e5c06"
gitTreeState = "clean"
buildDate = "2020-05-26T21:45:32Z"
buildDate = "2020-06-17T20:02:22Z"
)

View File

@ -222,7 +222,7 @@ func (o *CertificateOptions) modifyCertificateCondition(builder *resource.Builde
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
ContinueOnError().
FilenameParam(false, &o.FilenameOptions).
ResourceNames("certificatesigningrequest", o.csrNames...).
ResourceNames("certificatesigningrequests.v1beta1.certificates.k8s.io", o.csrNames...).
RequireObject(true).
Flatten().
Latest().
@ -232,7 +232,10 @@ func (o *CertificateOptions) modifyCertificateCondition(builder *resource.Builde
return err
}
for i := 0; ; i++ {
csr := info.Object.(*certificatesv1beta1.CertificateSigningRequest)
csr, ok := info.Object.(*certificatesv1beta1.CertificateSigningRequest)
if !ok {
return fmt.Errorf("can only handle certificates.k8s.io/v1beta1 certificate signing requests")
}
csr, hasCondition := modify(csr)
if !hasCondition || force {
_, err = clientSet.CertificateSigningRequests().UpdateApproval(context.TODO(), csr, metav1.UpdateOptions{})

View File

@ -13,7 +13,6 @@ go_library(
"//cmd/kube-apiserver/app/options:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/capabilities:go_default_library",
"//pkg/controller/serviceaccount:go_default_library",
"//pkg/features:go_default_library",
"//pkg/generated/openapi:go_default_library",
"//pkg/kubeapiserver:go_default_library",
@ -30,7 +29,6 @@ go_library(
"//pkg/registry/rbac/rest:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util/flag:go_default_library",
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library",
@ -44,7 +42,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
@ -78,7 +75,6 @@ go_library(
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],

View File

@ -30,13 +30,11 @@ import (
"strings"
"time"
"github.com/go-openapi/spec"
"k8s.io/kubernetes/pkg/kubelet/types"
"github.com/spf13/cobra"
extensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
@ -73,7 +71,6 @@ import (
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/capabilities"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/features"
generatedopenapi "k8s.io/kubernetes/pkg/generated/openapi"
"k8s.io/kubernetes/pkg/kubeapiserver"
@ -89,7 +86,6 @@ import (
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/pkg/serviceaccount"
utilflag "k8s.io/kubernetes/pkg/util/flag"
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
)
const (
@ -466,9 +462,6 @@ func buildGenericConfig(
if lastErr = s.SecureServing.ApplyTo(&genericConfig.SecureServing, &genericConfig.LoopbackClientConfig); lastErr != nil {
return
}
if lastErr = s.Authentication.ApplyTo(genericConfig); lastErr != nil {
return
}
if lastErr = s.Features.ApplyTo(genericConfig); lastErr != nil {
return
}
@ -524,9 +517,8 @@ func buildGenericConfig(
}
versionedInformers = clientgoinformers.NewSharedInformerFactory(clientgoExternalClient, 10*time.Minute)
genericConfig.Authentication.Authenticator, genericConfig.OpenAPIConfig.SecurityDefinitions, err = BuildAuthenticator(s, genericConfig.EgressSelector, clientgoExternalClient, versionedInformers)
if err != nil {
lastErr = fmt.Errorf("invalid authentication config: %v", err)
// Authentication.ApplyTo requires already applied OpenAPIConfig and EgressSelector if present
if lastErr = s.Authentication.ApplyTo(&genericConfig.Authentication, genericConfig.SecureServing, genericConfig.EgressSelector, genericConfig.OpenAPIConfig, clientgoExternalClient, versionedInformers); lastErr != nil {
return
}
@ -585,35 +577,6 @@ func buildGenericConfig(
return
}
// BuildAuthenticator constructs the authenticator
func BuildAuthenticator(s *options.ServerRunOptions, EgressSelector *egressselector.EgressSelector, extclient clientgoclientset.Interface, versionedInformer clientgoinformers.SharedInformerFactory) (authenticator.Request, *spec.SecurityDefinitions, error) {
authenticatorConfig, err := s.Authentication.ToAuthenticationConfig()
if err != nil {
return nil, nil, err
}
if s.Authentication.ServiceAccounts.Lookup || utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) {
authenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromClient(
extclient,
versionedInformer.Core().V1().Secrets().Lister(),
versionedInformer.Core().V1().ServiceAccounts().Lister(),
versionedInformer.Core().V1().Pods().Lister(),
)
}
authenticatorConfig.BootstrapTokenAuthenticator = bootstrap.NewTokenAuthenticator(
versionedInformer.Core().V1().Secrets().Lister().Secrets(v1.NamespaceSystem),
)
if EgressSelector != nil {
egressDialer, err := EgressSelector.Lookup(egressselector.Master.AsNetworkContext())
if err != nil {
return nil, nil, err
}
authenticatorConfig.CustomDial = egressDialer
}
return authenticatorConfig.New()
}
// BuildAuthorizer constructs the authorizer
func BuildAuthorizer(s *options.ServerRunOptions, EgressSelector *egressselector.EgressSelector, versionedInformers clientgoinformers.SharedInformerFactory) (authorizer.Authorizer, authorizer.RuleResolver, error) {
authorizationConfig := s.Authorization.ToAuthorizationConfig(versionedInformers)

View File

@ -36,7 +36,8 @@ import (
)
// BuildAuth creates an authenticator, an authorizer, and a matching authorizer attributes getter compatible with the kubelet's needs
func BuildAuth(nodeName types.NodeName, client clientset.Interface, config kubeletconfig.KubeletConfiguration) (server.AuthInterface, error) {
// It returns AuthInterface, a run method to start internal controllers (like cert reloading) and error.
func BuildAuth(nodeName types.NodeName, client clientset.Interface, config kubeletconfig.KubeletConfiguration) (server.AuthInterface, func(<-chan struct{}), error) {
// Get clients, if provided
var (
tokenClient authenticationclient.TokenReviewInterface
@ -47,47 +48,55 @@ func BuildAuth(nodeName types.NodeName, client clientset.Interface, config kubel
sarClient = client.AuthorizationV1().SubjectAccessReviews()
}
authenticator, err := BuildAuthn(tokenClient, config.Authentication)
authenticator, runAuthenticatorCAReload, err := BuildAuthn(tokenClient, config.Authentication)
if err != nil {
return nil, err
return nil, nil, err
}
attributes := server.NewNodeAuthorizerAttributesGetter(nodeName)
authorizer, err := BuildAuthz(sarClient, config.Authorization)
if err != nil {
return nil, err
return nil, nil, err
}
return server.NewKubeletAuth(authenticator, attributes, authorizer), nil
return server.NewKubeletAuth(authenticator, attributes, authorizer), runAuthenticatorCAReload, nil
}
// BuildAuthn creates an authenticator compatible with the kubelet's needs
func BuildAuthn(client authenticationclient.TokenReviewInterface, authn kubeletconfig.KubeletAuthentication) (authenticator.Request, error) {
var clientCertificateCAContentProvider authenticatorfactory.CAContentProvider
func BuildAuthn(client authenticationclient.TokenReviewInterface, authn kubeletconfig.KubeletAuthentication) (authenticator.Request, func(<-chan struct{}), error) {
var dynamicCAContentFromFile *dynamiccertificates.DynamicFileCAContent
var err error
if len(authn.X509.ClientCAFile) > 0 {
clientCertificateCAContentProvider, err = dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", authn.X509.ClientCAFile)
dynamicCAContentFromFile, err = dynamiccertificates.NewDynamicCAContentFromFile("client-ca-bundle", authn.X509.ClientCAFile)
if err != nil {
return nil, err
return nil, nil, err
}
}
authenticatorConfig := authenticatorfactory.DelegatingAuthenticatorConfig{
Anonymous: authn.Anonymous.Enabled,
CacheTTL: authn.Webhook.CacheTTL.Duration,
ClientCertificateCAContentProvider: clientCertificateCAContentProvider,
ClientCertificateCAContentProvider: dynamicCAContentFromFile,
}
if authn.Webhook.Enabled {
if client == nil {
return nil, errors.New("no client provided, cannot use webhook authentication")
return nil, nil, errors.New("no client provided, cannot use webhook authentication")
}
authenticatorConfig.TokenAccessReviewClient = client
}
authenticator, _, err := authenticatorConfig.New()
return authenticator, err
if err != nil {
return nil, nil, err
}
return authenticator, func(stopCh <-chan struct{}) {
if dynamicCAContentFromFile != nil {
go dynamicCAContentFromFile.Run(1, stopCh)
}
}, err
}
// BuildAuthz creates an authorizer compatible with the kubelet's needs

View File

@ -596,11 +596,12 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, featureGate f
}
if kubeDeps.Auth == nil {
auth, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration)
auth, runAuthenticatorCAReload, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration)
if err != nil {
return err
}
kubeDeps.Auth = auth
runAuthenticatorCAReload(stopCh)
}
var cgroupRoots []string

View File

@ -14,6 +14,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubeapiserver/options",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/serviceaccount:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubeapiserver/authenticator:go_default_library",
"//pkg/kubeapiserver/authorizer:go_default_library",
@ -50,6 +51,8 @@ go_library(
"//plugin/pkg/admission/storage/persistentvolume/resize:go_default_library",
"//plugin/pkg/admission/storage/storageclass/setdefault:go_default_library",
"//plugin/pkg/admission/storage/storageobjectinuseprotection:go_default_library",
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
@ -58,14 +61,17 @@ go_library(
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/validating:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/egressselector:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/kube-openapi/pkg/common:go_default_library",
],
)

View File

@ -24,17 +24,24 @@ import (
"time"
"github.com/spf13/pflag"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authentication/authenticator"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/egressselector"
genericoptions "k8s.io/apiserver/pkg/server/options"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
openapicommon "k8s.io/kube-openapi/pkg/common"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/features"
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
)
type BuiltInAuthenticationOptions struct {
@ -421,37 +428,62 @@ func (s *BuiltInAuthenticationOptions) ToAuthenticationConfig() (kubeauthenticat
return ret, nil
}
func (o *BuiltInAuthenticationOptions) ApplyTo(c *genericapiserver.Config) error {
// ApplyTo requires already applied OpenAPIConfig and EgressSelector if present.
func (o *BuiltInAuthenticationOptions) ApplyTo(authInfo *genericapiserver.AuthenticationInfo, secureServing *genericapiserver.SecureServingInfo, egressSelector *egressselector.EgressSelector, openAPIConfig *openapicommon.Config, extclient kubernetes.Interface, versionedInformer informers.SharedInformerFactory) error {
if o == nil {
return nil
}
if o.ClientCert != nil {
clientCertificateCAContentProvider, err := o.ClientCert.GetClientCAContentProvider()
if err != nil {
return fmt.Errorf("unable to load client CA file: %v", err)
}
if err = c.Authentication.ApplyClientCert(clientCertificateCAContentProvider, c.SecureServing); err != nil {
if openAPIConfig == nil {
return errors.New("uninitialized OpenAPIConfig")
}
authenticatorConfig, err := o.ToAuthenticationConfig()
if err != nil {
return err
}
if authenticatorConfig.ClientCAContentProvider != nil {
if err = authInfo.ApplyClientCert(authenticatorConfig.ClientCAContentProvider, secureServing); err != nil {
return fmt.Errorf("unable to load client CA file: %v", err)
}
}
if o.RequestHeader != nil {
requestHeaderConfig, err := o.RequestHeader.ToAuthenticationRequestHeaderConfig()
if err != nil {
return fmt.Errorf("unable to create request header authentication config: %v", err)
}
if requestHeaderConfig != nil {
if err = c.Authentication.ApplyClientCert(requestHeaderConfig.CAContentProvider, c.SecureServing); err != nil {
return fmt.Errorf("unable to load client CA file: %v", err)
}
if authenticatorConfig.RequestHeaderConfig != nil && authenticatorConfig.RequestHeaderConfig.CAContentProvider != nil {
if err = authInfo.ApplyClientCert(authenticatorConfig.RequestHeaderConfig.CAContentProvider, secureServing); err != nil {
return fmt.Errorf("unable to load client CA file: %v", err)
}
}
c.Authentication.SupportsBasicAuth = o.PasswordFile != nil && len(o.PasswordFile.BasicAuthFile) > 0
authInfo.SupportsBasicAuth = o.PasswordFile != nil && len(o.PasswordFile.BasicAuthFile) > 0
c.Authentication.APIAudiences = o.APIAudiences
authInfo.APIAudiences = o.APIAudiences
if o.ServiceAccounts != nil && o.ServiceAccounts.Issuer != "" && len(o.APIAudiences) == 0 {
c.Authentication.APIAudiences = authenticator.Audiences{o.ServiceAccounts.Issuer}
authInfo.APIAudiences = authenticator.Audiences{o.ServiceAccounts.Issuer}
}
if o.ServiceAccounts.Lookup || utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) {
authenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromClient(
extclient,
versionedInformer.Core().V1().Secrets().Lister(),
versionedInformer.Core().V1().ServiceAccounts().Lister(),
versionedInformer.Core().V1().Pods().Lister(),
)
}
authenticatorConfig.BootstrapTokenAuthenticator = bootstrap.NewTokenAuthenticator(
versionedInformer.Core().V1().Secrets().Lister().Secrets(metav1.NamespaceSystem),
)
if egressSelector != nil {
egressDialer, err := egressSelector.Lookup(egressselector.Master.AsNetworkContext())
if err != nil {
return err
}
authenticatorConfig.CustomDial = egressDialer
}
authInfo.Authenticator, openAPIConfig.SecurityDefinitions, err = authenticatorConfig.New()
if err != nil {
return err
}
return nil

View File

@ -212,6 +212,9 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
}
func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
// Garbage collect any stranded resources before allocating CPUs.
m.removeStaleState()
m.Lock()
defer m.Unlock()
@ -384,18 +387,14 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
}
if cstatus.State.Terminated != nil {
// Since the container is terminated, we know it is safe to
// remove it without any reconciliation. Removing the container
// will also remove it from the `containerMap` so that this
// container will be skipped next time around the loop.
// The container is terminated but we can't call m.RemoveContainer()
// here because it could remove the allocated cpuset for the container
// which may be in the process of being restarted. That would result
// in the container losing any exclusively-allocated CPUs that it
// was allocated.
_, _, err := m.containerMap.GetContainerRef(containerID)
if err == nil {
klog.Warningf("[cpumanager] reconcileState: skipping container; already terminated (pod: %s, container id: %s)", pod.Name, containerID)
err := m.RemoveContainer(containerID)
if err != nil {
klog.Errorf("[cpumanager] reconcileState: failed to remove container (pod: %s, container id: %s, error: %v)", pod.Name, containerID, err)
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
}
klog.Warningf("[cpumanager] reconcileState: ignoring terminated container (pod: %s, container id: %s)", pod.Name, containerID)
}
continue
}

View File

@ -77,6 +77,8 @@ type staticPolicy struct {
reserved cpuset.CPUSet
// topology manager reference to get container Topology affinity
affinity topologymanager.Store
// set of CPUs to reuse across allocations in a pod
cpusToReuse map[string]cpuset.CPUSet
}
// Ensure staticPolicy implements Policy interface
@ -107,9 +109,10 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
return &staticPolicy{
topology: topology,
reserved: reserved,
affinity: affinity,
topology: topology,
reserved: reserved,
affinity: affinity,
cpusToReuse: make(map[string]cpuset.CPUSet),
}, nil
}
@ -188,12 +191,37 @@ func (p *staticPolicy) assignableCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reserved)
}
func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, cset cpuset.CPUSet) {
// If pod entries to m.cpusToReuse other than the current pod exist, delete them.
for podUID := range p.cpusToReuse {
if podUID != string(pod.UID) {
delete(p.cpusToReuse, podUID)
}
}
// If no cpuset exists for cpusToReuse by this pod yet, create one.
if _, ok := p.cpusToReuse[string(pod.UID)]; !ok {
p.cpusToReuse[string(pod.UID)] = cpuset.NewCPUSet()
}
// Check if the container is an init container.
// If so, add its cpuset to the cpuset of reusable CPUs for any new allocations.
for _, initContainer := range pod.Spec.InitContainers {
if container.Name == initContainer.Name {
p.cpusToReuse[string(pod.UID)] = p.cpusToReuse[string(pod.UID)].Union(cset)
return
}
}
// Otherwise it is an app container.
// Remove its cpuset from the cpuset of reusable CPUs for any new allocations.
p.cpusToReuse[string(pod.UID)] = p.cpusToReuse[string(pod.UID)].Difference(cset)
}
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
klog.Infof("[cpumanager] static policy: Allocate (pod: %s, container: %s)", pod.Name, container.Name)
// container belongs in an exclusively allocated pool
if _, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
if cpuset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
p.updateCPUsToReuse(pod, container, cpuset)
klog.Infof("[cpumanager] static policy: container already present in state, skipping (pod: %s, container: %s)", pod.Name, container.Name)
return nil
}
@ -203,23 +231,14 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
klog.Infof("[cpumanager] Pod %v, Container %v Topology Affinity is: %v", pod.UID, container.Name, hint)
// Allocate CPUs according to the NUMA affinity contained in the hint.
cpuset, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity)
cpuset, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)])
if err != nil {
klog.Errorf("[cpumanager] unable to allocate %d CPUs (pod: %s, container: %s, error: %v)", numCPUs, pod.Name, container.Name, err)
return err
}
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
p.updateCPUsToReuse(pod, container, cpuset)
// Check if the container that has just been allocated resources is an init container.
// If so, release its CPUs back into the shared pool so they can be reallocated.
for _, initContainer := range pod.Spec.InitContainers {
if container.Name == initContainer.Name {
if toRelease, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
// Mutate the shared pool, adding released cpus.
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Union(toRelease))
}
}
}
}
// container belongs in the shared pool (nothing to do; use default cpuset)
return nil
@ -235,15 +254,17 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
return nil
}
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask) (cpuset.CPUSet, error) {
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d, socket: %v)", numCPUs, numaAffinity)
assignableCPUs := p.assignableCPUs(s).Union(reusableCPUs)
// If there are aligned CPUs in numaAffinity, attempt to take those first.
result := cpuset.NewCPUSet()
if numaAffinity != nil {
alignedCPUs := cpuset.NewCPUSet()
for _, numaNodeID := range numaAffinity.GetBits() {
alignedCPUs = alignedCPUs.Union(p.assignableCPUs(s).Intersection(p.topology.CPUDetails.CPUsInNUMANodes(numaNodeID)))
alignedCPUs = alignedCPUs.Union(assignableCPUs.Intersection(p.topology.CPUDetails.CPUsInNUMANodes(numaNodeID)))
}
numAlignedToAlloc := alignedCPUs.Size()
@ -260,7 +281,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
}
// Get any remaining CPUs from what's leftover after attempting to grab aligned ones.
remainingCPUs, err := takeByTopology(p.topology, p.assignableCPUs(s).Difference(result), numCPUs-result.Size())
remainingCPUs, err := takeByTopology(p.topology, assignableCPUs.Difference(result), numCPUs-result.Size())
if err != nil {
return cpuset.NewCPUSet(), err
}

View File

@ -54,19 +54,10 @@ func (i *internalContainerLifecycleImpl) PreStartContainer(pod *v1.Pod, containe
}
func (i *internalContainerLifecycleImpl) PreStopContainer(containerID string) error {
if i.cpuManager != nil {
return i.cpuManager.RemoveContainer(containerID)
}
return nil
}
func (i *internalContainerLifecycleImpl) PostStopContainer(containerID string) error {
if i.cpuManager != nil {
err := i.cpuManager.RemoveContainer(containerID)
if err != nil {
return err
}
}
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
err := i.topologyManager.RemoveContainer(containerID)
if err != nil {

View File

@ -77,6 +77,22 @@ func (kl *Kubelet) syncNetworkUtil() {
klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
return
}
// drop all non-local packets to localhost if they're not part of an existing
// forwarded connection. See #90259
if !kl.iptClient.IsIpv6() { // ipv6 doesn't have this issue
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
"-m", "comment", "--comment", "block incoming localnet connections",
"--dst", "127.0.0.0/8",
"!", "--src", "127.0.0.0/8",
"-m", "conntrack",
"!", "--ctstate", "RELATED,ESTABLISHED,DNAT",
"-j", "DROP"); err != nil {
klog.Errorf("Failed to ensure rule to drop invalid localhost packets in %v chain %v: %v", utiliptables.TableFilter, KubeFirewallChain, err)
return
}
}
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil {
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err)
return

View File

@ -57,5 +57,7 @@ go_test(
"//pkg/apis/core/helper:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/github.com/google/gofuzz:go_default_library",
],
)

View File

@ -44,6 +44,7 @@ func IsOnlyMutatingGCFields(obj, old runtime.Object, equalities conversion.Equal
copiedMeta.SetOwnerReferences(oldMeta.GetOwnerReferences())
copiedMeta.SetFinalizers(oldMeta.GetFinalizers())
copiedMeta.SetSelfLink(oldMeta.GetSelfLink())
copiedMeta.SetManagedFields(oldMeta.GetManagedFields())
return equalities.DeepEqual(copied, old)
}

View File

@ -21,7 +21,6 @@ import (
"reflect"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/scheduler/profile"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@ -32,6 +31,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/profile"
)
func (sched *Scheduler) onPvAdd(obj interface{}) {
@ -301,8 +301,8 @@ func responsibleForPod(pod *v1.Pod, profiles profile.Map) bool {
// skipPodUpdate checks whether the specified pod update should be ignored.
// This function will return true if
// - The pod has already been assumed, AND
// - The pod has only its ResourceVersion, Spec.NodeName and/or Annotations
// updated.
// - The pod has only its ResourceVersion, Spec.NodeName, Annotations,
// ManagedFields, Finalizers and/or Conditions updated.
func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool {
// Non-assumed pods should never be skipped.
isAssumed, err := sched.SchedulerCache.IsAssumedPod(pod)
@ -338,6 +338,10 @@ func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool {
// Same as above, when annotations are modified with ServerSideApply,
// ManagedFields may also change and must be excluded
p.ManagedFields = nil
// The following might be changed by external controllers, but they don't
// affect scheduling decisions.
p.Finalizers = nil
p.Status.Conditions = nil
return p
}
assumedPodCopy, podCopy := f(assumedPod), f(pod)

View File

@ -25,6 +25,7 @@ import (
"io"
"net"
"path"
"regexp"
"sort"
"strconv"
"strings"
@ -1202,7 +1203,13 @@ func azToRegion(az string) (string, error) {
if len(az) < 1 {
return "", fmt.Errorf("invalid (empty) AZ")
}
region := az[:len(az)-1]
r := regexp.MustCompile(`^([a-zA-Z]+-)+\d+`)
region := r.FindString(az)
if region == "" {
return "", fmt.Errorf("invalid AZ: %s", az)
}
return region, nil
}

View File

@ -74,7 +74,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
}
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
if err != nil {
return nil, err
}
@ -264,7 +264,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
}
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
if err != nil {
return "", err
}
@ -351,7 +351,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
}
if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
if err != nil {
return "", err
}

View File

@ -1082,15 +1082,9 @@ func (az *Cloud) reconcileLoadBalancerRule(
BackendPort: to.Int32Ptr(port.Port),
DisableOutboundSnat: to.BoolPtr(az.disableLoadBalancerOutboundSNAT()),
EnableTCPReset: enableTCPReset,
EnableFloatingIP: to.BoolPtr(true),
},
}
// LB does not support floating IPs for IPV6 rules
if utilnet.IsIPv6String(service.Spec.ClusterIP) {
expectedRule.BackendPort = to.Int32Ptr(port.NodePort)
expectedRule.EnableFloatingIP = to.BoolPtr(false)
} else {
expectedRule.EnableFloatingIP = to.BoolPtr(true)
}
if protocol == v1.ProtocolTCP {
expectedRule.LoadBalancingRulePropertiesFormat.IdleTimeoutInMinutes = lbIdleTimeout

View File

@ -82,10 +82,12 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (
var err error
klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
var createZones *[]string
var createZones []string
if len(options.AvailabilityZone) > 0 {
zoneList := []string{c.common.cloud.GetZoneID(options.AvailabilityZone)}
createZones = &zoneList
requestedZone := c.common.cloud.GetZoneID(options.AvailabilityZone)
if requestedZone != "" {
createZones = append(createZones, requestedZone)
}
}
// insert original tags to newTags
@ -155,13 +157,16 @@ func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (
model := compute.Disk{
Location: &c.common.location,
Tags: newTags,
Zones: createZones,
Sku: &compute.DiskSku{
Name: diskSku,
},
DiskProperties: &diskProperties,
}
if len(createZones) > 0 {
model.Zones = &createZones
}
if options.ResourceGroup == "" {
options.ResourceGroup = c.common.resourceGroup
}

View File

@ -111,6 +111,11 @@ func (g *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1
return status, true, nil
}
// Checking for finalizer is more accurate because controller restart could happen in the middle of resource
// deletion. So even though forwarding rule was deleted, cleanup might not have been complete.
if hasFinalizer(svc, ILBFinalizerV1) {
return &v1.LoadBalancerStatus{}, true, nil
}
return nil, false, ignoreNotFound(err)
}

View File

@ -63,6 +63,9 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
"Skipped ensureInternalLoadBalancer as service contains '%s' finalizer.", ILBFinalizerV2)
return nil, cloudprovider.ImplementedElsewhere
}
loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc)
klog.V(2).Infof("ensureInternalLoadBalancer(%v): Attaching %q finalizer", loadBalancerName, ILBFinalizerV1)
if err := addFinalizer(svc, g.client.CoreV1(), ILBFinalizerV1); err != nil {
klog.Errorf("Failed to attach finalizer '%s' on service %s/%s - %v", ILBFinalizerV1, svc.Namespace, svc.Name, err)
return nil, err
@ -86,7 +89,6 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
}
}
loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc)
sharedBackend := shareBackendService(svc)
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity)
backendServiceLink := g.getBackendServiceLink(backendServiceName)
@ -314,10 +316,12 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string,
// Try deleting instance groups - expect ResourceInuse error if needed by other LBs
igName := makeInstanceGroupName(clusterID)
klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): Attempting delete of instanceGroup %v", loadBalancerName, igName)
if err := g.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) {
return err
}
klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): Removing %q finalizer", loadBalancerName, ILBFinalizerV1)
if err := removeFinalizer(svc, g.client.CoreV1(), ILBFinalizerV1); err != nil {
klog.Errorf("Failed to remove finalizer '%s' on service %s/%s - %v", ILBFinalizerV1, svc.Namespace, svc.Name, err)
return err

42
vendor/modules.txt vendored
View File

@ -1207,7 +1207,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
@ -1251,7 +1251,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1291,7 +1291,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
@ -1353,7 +1353,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer
@ -1483,7 +1483,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1496,7 +1496,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk
@ -1730,7 +1730,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1
k8s.io/cloud-provider
k8s.io/cloud-provider/api
k8s.io/cloud-provider/node/helpers
@ -1738,13 +1738,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1
k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1
k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1759,7 +1759,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1
k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec
@ -1777,10 +1777,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/metrics/testutil
k8s.io/component-base/version
k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1
k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
@ -1795,7 +1795,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0
k8s.io/klog
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1823,7 +1823,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1
k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/aggregator
@ -1834,14 +1834,14 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1
k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1
k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1
k8s.io/kube-scheduler/config/v1alpha2
k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1
k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate
@ -1916,11 +1916,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1
k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.3-k3s1
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.4-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2664,7 +2664,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
k8s.io/kubernetes/third_party/forked/ipvs
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1
k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth
@ -2695,7 +2695,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1
k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2