update master to 1.20.4

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
This commit is contained in:
galal-hussein 2021-02-22 22:08:19 +02:00 committed by Brad Davidson
parent f9fdb94df2
commit fad2a046c3
49 changed files with 783 additions and 492 deletions

58
go.mod
View File

@ -32,34 +32,34 @@ replace (
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
google.golang.org/grpc => google.golang.org/grpc v1.27.1
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.2-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.2-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.2-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.2-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.2-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.2-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.2-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.2-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.2-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.2-k3s1
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.2-k3s1
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.2-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.2-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.2-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.2-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.2-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.2-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.2-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.2-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.2-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.2-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.2-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.2-k3s1
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.2-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.2-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.2-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.2-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.2-k3s1
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.4-k3s1
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.4-k3s1
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.4-k3s1
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.4-k3s1
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.4-k3s1
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.4-k3s1
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.4-k3s1
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.4-k3s1
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.4-k3s1
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.4-k3s1
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.4-k3s1
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.4-k3s1
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.4-k3s1
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.4-k3s1
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.4-k3s1
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.4-k3s1
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.4-k3s1
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.4-k3s1
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.4-k3s1
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.4-k3s1
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.4-k3s1
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.4-k3s1
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.4-k3s1
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.4-k3s1
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.4-k3s1
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.4-k3s1
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.4-k3s1
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.4-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)
@ -120,6 +120,6 @@ require (
k8s.io/cri-api v0.19.0
k8s.io/klog v1.0.0
k8s.io/kubectl v0.0.0
k8s.io/kubernetes v1.20.2
k8s.io/kubernetes v1.20.4
sigs.k8s.io/yaml v1.2.0
)

102
go.sum
View File

@ -388,8 +388,8 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/cadvisor v0.38.6 h1:5vu8NaOqsBKF6wOxBLeDPD7hcmxfg/4I5NZGYXK7gIo=
github.com/google/cadvisor v0.38.6/go.mod h1:1OFB9sOOMkBdUBGCO/1SArawTnDscgMzTodacVDe8mA=
github.com/google/cadvisor v0.38.7 h1:ZWyUz+23k1PRmEA+yrnDGtEC6IuU4Vc6439x2NQLHnA=
github.com/google/cadvisor v0.38.7/go.mod h1:1OFB9sOOMkBdUBGCO/1SArawTnDscgMzTodacVDe8mA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -524,55 +524,55 @@ github.com/k3s-io/helm-controller v0.8.3 h1:GWxavyMz7Bw2ClxH5okkeOL8o5U6IBK7uauc
github.com/k3s-io/helm-controller v0.8.3/go.mod h1:nZP8FH3KZrNNUf5r+SwwiMR63HS6lxdHdpHijgPfF74=
github.com/k3s-io/kine v0.6.0 h1:4l7wjgCxb2oD+7Hyf3xIhkGd/6s1sXpRFdQiyy+7Ki8=
github.com/k3s-io/kine v0.6.0/go.mod h1:rzCs93+rQHZGOiewMd84PDrER92QeZ6eeHbWkfEy4+w=
github.com/k3s-io/kubernetes v1.20.2-k3s1 h1:z0Iqn2XPITD/0kCZ0h6IK/j/Gm3shiKoWgCgOqbdalo=
github.com/k3s-io/kubernetes v1.20.2-k3s1/go.mod h1:v2E/T5A+ikBt81n9HZ8VVns5IXSsMHP1ITIFuHY3d5s=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.2-k3s1 h1:HCMlr6q4zY6F1OQgTzBVYOpOzEzudxwd2IQDRZO7Vmw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.2-k3s1/go.mod h1:WITCNA3qJC44dfLg1s/kdEQ51akGrwqOKkMnN7XoKXE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.2-k3s1 h1:AouMiWpEVCur/ndBVPS16gN72lZXZsLNoXefJdsCU9c=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.2-k3s1/go.mod h1:A5mDfmENvCI0QodhAc0ipXV1aI5JUiZoJkA7BvHs9EQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.2-k3s1 h1:1MS9XHHAVpRPnFogBpz5zxY1eV3h5paVO9esekmNoDI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.2-k3s1/go.mod h1:0y+U/8BmQoVgMGE+0/gge588aGi4gmbN3lxoCG+3ADo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.2-k3s1 h1:XtapVXr6neBNhf1bajvh9MHzfJkD7Od4i+0mOaxpxpc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.2-k3s1/go.mod h1:NsoqXeT+WOthrid9HxmbcdWoodnkGiwyPN2n3Xl4mzk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.2-k3s1 h1:hTpT+J4KeDqah93ZEYxMQlyTlDjljC9n8ll5wnU9Z7s=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.2-k3s1/go.mod h1:nlZrhzrCcFv6W/jOigxyAFPNDg3aMkPDT2W62PpcGWI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.2-k3s1 h1:80VWRcsuFD6YqfM2JW8sPeHLrH5C+AVvxP+b9lN9mFE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.2-k3s1/go.mod h1:aYCqdW6DxKJzKkgoUBLWHRy9L2PEEMBfHfl79/Dw3vU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.2-k3s1 h1:p3KZun8y8+SDFgsd0mAjxY2JIHCgdtUXFsPdQF/MTqc=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.2-k3s1/go.mod h1:PHIzADQFbVKfpb2K1IxHnNelXeeDHkXOR5Iz1E7OuUk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.2-k3s1 h1:vZJVLIMCmTkK7QwS8a+IDMN9rHVYXs6ihcABr6t9ms0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.2-k3s1/go.mod h1:spaI5+5c+1VReIDh7pGGs8ngDc3IoHwmTF8X+fTrP08=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.2-k3s1 h1:Jt5+rjXlT7vJLNN+6UwoxtHUTC2BLKQbjbGtIygeyx8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.2-k3s1/go.mod h1:2R6qHsMnv/qydEgHBJ0z6r44uWkFoHzV5/GWbFxqwic=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.2-k3s1 h1:gxO0jPyEb0n8kPBQvBCv2tqt+EElnCHJ+QH7mbS/8o4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.2-k3s1/go.mod h1:8GInE8yyzLYFslW12FRnTl/9YkmR9f/GGzCYu8ANHCQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.2-k3s1 h1:8w9wN1xXWXb7q4tpL/vuJpwtrfHTnkqb4HDz2EITdmY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.2-k3s1/go.mod h1:pw+OH968rZrCvNZo0Eer3HGuPtKWS0dYbGot9aZ64Y4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.2-k3s1 h1:wAvb3TJOsHkOdt++dSwvXXA77FdxZCV+0tXe+Hw4Baw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.2-k3s1/go.mod h1:9QKLqDYjrnFJFS+rSO1jIH00jff39O2VGuZFkAUZPuM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.2-k3s1 h1:iFipsOSnXlWTMipEeexwd4ZK5ZlyhIgKhRrti9YRm9g=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.2-k3s1/go.mod h1:4xhnGOFiv9lkvENieScdSfcQHzXH9DR6QZRrCH0amMo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.2-k3s1 h1:/IZqL+k7UW4ryPn0ggxj0WEXcdrgK0LcQdh061pyQqA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.2-k3s1/go.mod h1:Zicsc4aFIp+w3G/peRJVi5s2aJEZ9jQfGiG7ozhTXM4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.2-k3s1 h1:qdiL8+94fQ4Eyq0YTVIRyMZUIm8JqHKEy/6jIgdKAMw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.2-k3s1/go.mod h1:tQfIsKxDtV4B9JCUmG6Aum06bboy50wSjeAQvQ3rw2E=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.2-k3s1 h1:vfDSMrzYZX4YkRApgdu21I97fGP4jw3ksd0kLaLtU20=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.2-k3s1/go.mod h1:wwy8f7/wn8nH5uZq1RrbJmZoYRoFicaqndxs8vbsALA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.2-k3s1 h1:hTHrnFN62BHxhwmZjixFK6+LcUj/M5HNbvENofAn+jY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.2-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.2-k3s1 h1:ac3hDcDQxk+wiiwrLC6fKEOrQk8Q/i1/LKpGaOTeGGY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.2-k3s1/go.mod h1:KGBnJPnA0KkPNM8DFYa7IrEslOHOKUqewL+USqnG6fo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.2-k3s1 h1:VZIvrlDdOUpexNvxrMtQpjEXK79o5+8iclZi/0Sxwo8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.2-k3s1/go.mod h1:34qOOg9yrR23GRMMunyDE4ngDYnz6Q7/T607q/qWnfM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.2-k3s1 h1:ItXuGAfFESwrKHiktE4wvjkqMniZB8AvskyNSWrpDiw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.2-k3s1/go.mod h1:B80sNAi0WQAEMuil1AWJk58uBTWbeaB2rpalid8bwVA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.2-k3s1 h1:0qlQuZr5A6izAULoQbReMXCYg5zBu3qcTr8WC4FSvHg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.2-k3s1/go.mod h1:Ar3mk1+4I10WJg8SwpYKlK7hzsa9BPd8W0Stzxr90Q0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.2-k3s1 h1:D7rQ4q2n9iXIP5YRnU2QDgs9gsTL+qiG68qAUMRNONw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.2-k3s1/go.mod h1:L4m/Vfo6CeUk2HBd8mEcYDLksPqbYuu5Nn+DKU2qvUQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.2-k3s1 h1:90py2hYmmEcBcsAprsEh+ANhPcqEh4T4/iHO38yull8=
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.2-k3s1/go.mod h1:mFeSjsWvLj55xUpwltalvolz49izW7J0N5RfoIHrKKY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.2-k3s1/go.mod h1:bbMx+Jiwi0GviI8UoqNJ5fpjNfCYBbu8plusmJAVvck=
github.com/k3s-io/kubernetes v1.20.4-k3s1 h1:OyOgPbVcxleCmW9IdCh/+BB2jNXkbhsKug229LLpf/g=
github.com/k3s-io/kubernetes v1.20.4-k3s1/go.mod h1:5oh+vhVyWep2o+IH61i3aU4e/Q77Yt96tcKwYOpVbvk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.4-k3s1 h1:AehOKU0x0FzdJ3AmUmltwVF/8f00XI/o2MszP0zC1wY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.4-k3s1/go.mod h1:WITCNA3qJC44dfLg1s/kdEQ51akGrwqOKkMnN7XoKXE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.4-k3s1 h1:oaZhaNRFAiOE1MaATyqxZ80e8A2CC6huIwxz1lpiR4M=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.4-k3s1/go.mod h1:A5mDfmENvCI0QodhAc0ipXV1aI5JUiZoJkA7BvHs9EQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.4-k3s1 h1:lERSmRCemnIIlu0vjtu+SEiZ5AYqv+yJR/z8qC2FhO0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.4-k3s1/go.mod h1:0y+U/8BmQoVgMGE+0/gge588aGi4gmbN3lxoCG+3ADo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.4-k3s1 h1:GgcriagW6LmpKCteZibWHaXGi24M2iXsoRygYVYNiWs=
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.4-k3s1/go.mod h1:NsoqXeT+WOthrid9HxmbcdWoodnkGiwyPN2n3Xl4mzk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.4-k3s1 h1:Rmxz9K3Wynd+u78frxoOc6gd8PrL49BIqI2nOM/AY9c=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.4-k3s1/go.mod h1:nlZrhzrCcFv6W/jOigxyAFPNDg3aMkPDT2W62PpcGWI=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.4-k3s1 h1:1oORl2n3SKiTMxiCjd09d+ddk928VDsawlT7+Rtb1zQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.4-k3s1/go.mod h1:aYCqdW6DxKJzKkgoUBLWHRy9L2PEEMBfHfl79/Dw3vU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.4-k3s1 h1:OittzYvKc4nRPDq1RWqGNLrMkE+sCMbX69DZjjJ/WhU=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.4-k3s1/go.mod h1:PHIzADQFbVKfpb2K1IxHnNelXeeDHkXOR5Iz1E7OuUk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.4-k3s1 h1:VuZ5otj40K4b7MPQfzxe/1XFFpZSHLG5Bd5cT5e6OfE=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.4-k3s1/go.mod h1:spaI5+5c+1VReIDh7pGGs8ngDc3IoHwmTF8X+fTrP08=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.4-k3s1 h1:n8fmsMGar3nkOjbRxVJzTbXVcoz/rl5V0b/V7SLfqAk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.4-k3s1/go.mod h1:2R6qHsMnv/qydEgHBJ0z6r44uWkFoHzV5/GWbFxqwic=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.4-k3s1 h1:KIPyYGs12L7bp+ua7n1Bj4sbkhkBMCT00MTLeY/I8uM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.4-k3s1/go.mod h1:8GInE8yyzLYFslW12FRnTl/9YkmR9f/GGzCYu8ANHCQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.4-k3s1 h1:tYda2GG3lYaOfqcBRKWQ1Xhbyvm+1jTPcbO+dqxq1T0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.4-k3s1/go.mod h1:pw+OH968rZrCvNZo0Eer3HGuPtKWS0dYbGot9aZ64Y4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.4-k3s1 h1:dRo5U/jWsCvSnBV1h/4gfK2kkev87Wp2kW2f/UDQibg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.4-k3s1/go.mod h1:9QKLqDYjrnFJFS+rSO1jIH00jff39O2VGuZFkAUZPuM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.4-k3s1 h1:sHRjX0aMOL6x7cKuRCGccuPHdYCiCAWkv5f8z2YXHnM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.4-k3s1/go.mod h1:4xhnGOFiv9lkvENieScdSfcQHzXH9DR6QZRrCH0amMo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.4-k3s1 h1:oHWMogBFxp6XVN62T7SOEhDyuF7xKnJGrLwlNqCuphQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.4-k3s1/go.mod h1:Zicsc4aFIp+w3G/peRJVi5s2aJEZ9jQfGiG7ozhTXM4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.4-k3s1 h1:WwgcRlVX57Zcg/FeJdGrDBAqDgMXqCveb/yjsY9biNw=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.4-k3s1/go.mod h1:tQfIsKxDtV4B9JCUmG6Aum06bboy50wSjeAQvQ3rw2E=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.4-k3s1 h1:YCM+5NheOXbvvkig/P1BKwtr+YhYfOp/9GQIlr1mBMA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.4-k3s1/go.mod h1:wwy8f7/wn8nH5uZq1RrbJmZoYRoFicaqndxs8vbsALA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.4-k3s1 h1:hB56k3Y+vBCXi1IwJqXMmiSAZAPLMTs0bxRtR7q+s4A=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.4-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.4-k3s1 h1:wJQQ6C80njjwS9jvkaesUuzZcg5mztZ7P+AQuCTnFOQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.4-k3s1/go.mod h1:KGBnJPnA0KkPNM8DFYa7IrEslOHOKUqewL+USqnG6fo=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.4-k3s1 h1:3/tXi0JQNiAHG5Ooo0Umjik0/KAAWDCdX84EJZoCsCk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.4-k3s1/go.mod h1:34qOOg9yrR23GRMMunyDE4ngDYnz6Q7/T607q/qWnfM=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.4-k3s1 h1:mDKk81OPp2y7QHDn9dOX2y6SEnUUB7dZLGfHiWbnSMg=
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.4-k3s1/go.mod h1:B80sNAi0WQAEMuil1AWJk58uBTWbeaB2rpalid8bwVA=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.4-k3s1 h1:xcZKUX3FG7B4aOqsk62A1SKNztwFcOnPPxa2OR6xBh4=
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.4-k3s1/go.mod h1:Ar3mk1+4I10WJg8SwpYKlK7hzsa9BPd8W0Stzxr90Q0=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.4-k3s1 h1:8QCwYZezUGboDqM7cCA9u7Tci/bsoePLZQWRTqEF9IY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.4-k3s1/go.mod h1:L4m/Vfo6CeUk2HBd8mEcYDLksPqbYuu5Nn+DKU2qvUQ=
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.4-k3s1 h1:3g4v0celZa2AtEyvnko5NpGcpbYzRQF39tbaqdmvztk=
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.4-k3s1/go.mod h1:mFeSjsWvLj55xUpwltalvolz49izW7J0N5RfoIHrKKY=
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.4-k3s1/go.mod h1:bbMx+Jiwi0GviI8UoqNJ5fpjNfCYBbu8plusmJAVvck=
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=

View File

@ -527,6 +527,26 @@ func (i *RealFsInfo) GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error) {
return &DeviceInfo{deviceName, p.major, p.minor}, nil
}
func (i *RealFsInfo) mountInfoFromDir(dir string) (*mount.MountInfo, bool) {
mount, found := i.mounts[dir]
// try the parent dir if not found until we reach the root dir
// this is an issue on btrfs systems where the directory is not
// the subvolume
for !found {
pathdir, _ := filepath.Split(dir)
// break when we reach root
if pathdir == "/" {
mount, found = i.mounts["/"]
break
}
// trim "/" from the new parent path otherwise the next possible
// filepath.Split in the loop will not split the string any further
dir = strings.TrimSuffix(pathdir, "/")
mount, found = i.mounts[dir]
}
return &mount, found
}
func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
buf := new(syscall.Stat_t)
err := syscall.Stat(dir, buf)
@ -543,24 +563,9 @@ func (i *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
}
}
mount, found := i.mounts[dir]
// try the parent dir if not found until we reach the root dir
// this is an issue on btrfs systems where the directory is not
// the subvolume
for !found {
pathdir, _ := filepath.Split(dir)
// break when we reach root
if pathdir == "/" {
break
}
// trim "/" from the new parent path otherwise the next possible
// filepath.Split in the loop will not split the string any further
dir = strings.TrimSuffix(pathdir, "/")
mount, found = i.mounts[dir]
}
mount, found := i.mountInfoFromDir(dir)
if found && mount.FsType == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
major, minor, err := getBtrfsMajorMinorIds(&mount)
major, minor, err := getBtrfsMajorMinorIds(mount)
if err != nil {
klog.Warningf("%s", err)
} else {

View File

@ -226,6 +226,10 @@ func (s *DelegatingAuthenticationOptions) WithClientTimeout(timeout time.Duratio
}
func (s *DelegatingAuthenticationOptions) Validate() []error {
if s == nil {
return nil
}
allErrors := []error{}
allErrors = append(allErrors, s.RequestHeader.Validate()...)

View File

@ -104,8 +104,11 @@ func (s *DelegatingAuthorizationOptions) WithCustomRetryBackoff(backoff wait.Bac
}
func (s *DelegatingAuthorizationOptions) Validate() []error {
allErrors := []error{}
if s == nil {
return nil
}
allErrors := []error{}
if s.WebhookRetryBackoff != nil && s.WebhookRetryBackoff.Steps <= 0 {
allErrors = append(allErrors, fmt.Errorf("number of webhook retry attempts must be greater than 1, but is: %d", s.WebhookRetryBackoff.Steps))
}

View File

@ -37,10 +37,8 @@ var (
&compbasemetrics.HistogramOpts{
Name: "etcd_request_duration_seconds",
Help: "Etcd request latency in seconds for each operation and object type.",
// Keeping it similar to the buckets used by the apiserver_request_duration_seconds metric so that
// api latency and etcd latency can be more comparable side by side.
Buckets: []float64{.005, .01, .025, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7,
0.8, 0.9, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60},
// Etcd request latency in seconds for each operation and object type.
Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 15.0, 30.0, 60.0},
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"operation", "type"},

View File

@ -104,14 +104,14 @@ func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token
}
var (
result *authenticationv1.TokenReview
err error
auds authenticator.Audiences
)
webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error {
result, err = w.tokenReview.Create(ctx, r, metav1.CreateOptions{})
return err
}, webhook.DefaultShouldRetry)
if err != nil {
// WithExponentialBackoff will return tokenreview create error (tokenReviewErr) if any.
if err := webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error {
var tokenReviewErr error
result, tokenReviewErr = w.tokenReview.Create(ctx, r, metav1.CreateOptions{})
return tokenReviewErr
}, webhook.DefaultShouldRetry); err != nil {
// An error here indicates bad configuration or an outage. Log for debugging.
klog.Errorf("Failed to make webhook authenticator request: %v", err)
return nil, false, err

View File

@ -192,19 +192,17 @@ func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attri
if entry, ok := w.responseCache.Get(string(key)); ok {
r.Status = entry.(authorizationv1.SubjectAccessReviewStatus)
} else {
var (
result *authorizationv1.SubjectAccessReview
err error
)
webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error {
result, err = w.subjectAccessReview.Create(ctx, r, metav1.CreateOptions{})
return err
}, webhook.DefaultShouldRetry)
if err != nil {
// An error here indicates bad configuration or an outage. Log for debugging.
var result *authorizationv1.SubjectAccessReview
// WithExponentialBackoff will return SAR create error (sarErr) if any.
if err := webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error {
var sarErr error
result, sarErr = w.subjectAccessReview.Create(ctx, r, metav1.CreateOptions{})
return sarErr
}, webhook.DefaultShouldRetry); err != nil {
klog.Errorf("Failed to make webhook authorizer request: %v", err)
return w.decisionOnError, "", err
}
r.Status = result.Status
if shouldCache(attr) {
if r.Status.Allowed {

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "20"
gitVersion = "v1.20.2-k3s1"
gitCommit = "426bcc7128ac615e92181e084eafdef3b0206d76"
gitVersion = "v1.20.4-k3s1"
gitCommit = "dbe940a8b30b544ac26d7947bb7dc7f62a5f484d"
gitTreeState = "clean"
buildDate = "2021-01-13T19:38:13Z"
buildDate = "2021-02-18T20:50:53Z"
)

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "20"
gitVersion = "v1.20.2-k3s1"
gitCommit = "426bcc7128ac615e92181e084eafdef3b0206d76"
gitVersion = "v1.20.4-k3s1"
gitCommit = "dbe940a8b30b544ac26d7947bb7dc7f62a5f484d"
gitTreeState = "clean"
buildDate = "2021-01-13T19:38:13Z"
buildDate = "2021-02-18T20:50:53Z"
)

View File

@ -46,6 +46,7 @@ go_test(
"azure_file_test.go",
"gce_pd_test.go",
"in_tree_volume_test.go",
"openstack_cinder_test.go",
"vsphere_volume_test.go",
],
embed = [":go_default_library"],

View File

@ -98,7 +98,7 @@ func (t *awsElasticBlockStoreCSITranslator) TranslateInTreeInlineVolumeToCSI(vol
ObjectMeta: metav1.ObjectMeta{
// Must be unique per disk as it is used as the unique part of the
// staging path
Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, ebsSource.VolumeID),
Name: fmt.Sprintf("%s-%s", AWSEBSDriverName, volumeHandle),
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{

View File

@ -37,11 +37,11 @@ const (
volumeIDTemplate = "%s#%s#%s#%s"
// Parameter names defined in azure file CSI driver, refer to
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
azureFileShareName = "shareName"
secretNameTemplate = "azure-storage-account-%s-secret"
defaultSecretNamespace = "default"
shareNameField = "sharename"
secretNameField = "secretname"
secretNamespaceField = "secretnamespace"
secretNameTemplate = "azure-storage-account-%s-secret"
defaultSecretNamespace = "default"
resourceGroupAnnotation = "kubernetes.io/azure-file-resource-group"
)
@ -90,7 +90,7 @@ func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Vol
Driver: AzureFileDriverName,
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""),
ReadOnly: azureSource.ReadOnly,
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
NodeStageSecretRef: &v1.SecretReference{
Name: azureSource.SecretName,
Namespace: defaultSecretNamespace,
@ -135,7 +135,7 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
Namespace: defaultSecretNamespace,
},
ReadOnly: azureSource.ReadOnly,
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
VolumeAttributes: map[string]string{shareNameField: azureSource.ShareName},
VolumeHandle: volumeID,
}
)
@ -163,31 +163,48 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
ReadOnly: csiSource.ReadOnly,
}
for k, v := range csiSource.VolumeAttributes {
switch strings.ToLower(k) {
case shareNameField:
azureSource.ShareName = v
case secretNameField:
azureSource.SecretName = v
case secretNamespaceField:
ns := v
azureSource.SecretNamespace = &ns
}
}
resourceGroup := ""
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
azureSource.SecretName = csiSource.NodeStageSecretRef.Name
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
if csiSource.VolumeAttributes != nil {
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
azureSource.ShareName = shareName
}
}
} else {
}
if azureSource.ShareName == "" || azureSource.SecretName == "" {
rg, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
if err != nil {
return nil, err
}
azureSource.ShareName = fileShareName
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
if azureSource.ShareName == "" {
azureSource.ShareName = fileShareName
}
if azureSource.SecretName == "" {
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
}
resourceGroup = rg
}
if azureSource.SecretNamespace == nil {
ns := defaultSecretNamespace
azureSource.SecretNamespace = &ns
}
pv.Spec.CSI = nil
pv.Spec.AzureFile = azureSource
if pv.ObjectMeta.Annotations == nil {
pv.ObjectMeta.Annotations = map[string]string{}
}
if resourceGroup != "" {
if pv.ObjectMeta.Annotations == nil {
pv.ObjectMeta.Annotations = map[string]string{}
}
pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup
}

View File

@ -18,6 +18,7 @@ package plugins
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
@ -45,6 +46,30 @@ func NewOpenStackCinderCSITranslator() InTreePlugin {
// TranslateInTreeStorageClassParametersToCSI translates InTree Cinder storage class parameters to CSI storage class
func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) {
var (
params = map[string]string{}
)
for k, v := range sc.Parameters {
switch strings.ToLower(k) {
case fsTypeKey:
params[csiFsTypeKey] = v
default:
// All other parameters are supported by the CSI driver.
// This includes also "availability", therefore do not translate it to sc.AllowedTopologies
params[k] = v
}
}
if len(sc.AllowedTopologies) > 0 {
newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies, CinderTopologyKey)
if err != nil {
return nil, fmt.Errorf("failed translating allowed topologies: %v", err)
}
sc.AllowedTopologies = newTopologies
}
sc.Parameters = params
return sc, nil
}

View File

@ -54,20 +54,20 @@ func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *kubeschedulerconfig
fs.StringVar(&o.PolicyConfigMapNamespace, "policy-configmap-namespace", o.PolicyConfigMapNamespace, "DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty. Note: The scheduler will fail if this is combined with Plugin configs")
fs.BoolVar(&o.UseLegacyPolicyConfig, "use-legacy-policy-config", o.UseLegacyPolicyConfig, "DEPRECATED: when set to true, scheduler will ignore policy ConfigMap and uses policy config file. Note: The scheduler will fail if this is combined with Plugin configs")
fs.BoolVar(&cfg.EnableProfiling, "profiling", cfg.EnableProfiling, "DEPRECATED: enable profiling via web interface host:port/debug/pprof/")
fs.BoolVar(&cfg.EnableContentionProfiling, "contention-profiling", cfg.EnableContentionProfiling, "DEPRECATED: enable lock contention profiling, if profiling is enabled")
fs.StringVar(&cfg.ClientConnection.Kubeconfig, "kubeconfig", cfg.ClientConnection.Kubeconfig, "DEPRECATED: path to kubeconfig file with authorization and master location information.")
fs.StringVar(&cfg.ClientConnection.ContentType, "kube-api-content-type", cfg.ClientConnection.ContentType, "DEPRECATED: content type of requests sent to apiserver.")
fs.Float32Var(&cfg.ClientConnection.QPS, "kube-api-qps", cfg.ClientConnection.QPS, "DEPRECATED: QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&cfg.ClientConnection.Burst, "kube-api-burst", cfg.ClientConnection.Burst, "DEPRECATED: burst to use while talking with kubernetes apiserver")
fs.StringVar(&cfg.LeaderElection.ResourceNamespace, "lock-object-namespace", cfg.LeaderElection.ResourceNamespace, "DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace.")
fs.StringVar(&cfg.LeaderElection.ResourceName, "lock-object-name", cfg.LeaderElection.ResourceName, "DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name")
fs.BoolVar(&cfg.EnableProfiling, "profiling", cfg.EnableProfiling, "DEPRECATED: enable profiling via web interface host:port/debug/pprof/. This parameter is ignored if a config file is specified in --config.")
fs.BoolVar(&cfg.EnableContentionProfiling, "contention-profiling", cfg.EnableContentionProfiling, "DEPRECATED: enable lock contention profiling, if profiling is enabled. This parameter is ignored if a config file is specified in --config.")
fs.StringVar(&cfg.ClientConnection.Kubeconfig, "kubeconfig", cfg.ClientConnection.Kubeconfig, "DEPRECATED: path to kubeconfig file with authorization and master location information. This parameter is ignored if a config file is specified in --config.")
fs.StringVar(&cfg.ClientConnection.ContentType, "kube-api-content-type", cfg.ClientConnection.ContentType, "DEPRECATED: content type of requests sent to apiserver. This parameter is ignored if a config file is specified in --config.")
fs.Float32Var(&cfg.ClientConnection.QPS, "kube-api-qps", cfg.ClientConnection.QPS, "DEPRECATED: QPS to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.")
fs.Int32Var(&cfg.ClientConnection.Burst, "kube-api-burst", cfg.ClientConnection.Burst, "DEPRECATED: burst to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.")
fs.StringVar(&cfg.LeaderElection.ResourceNamespace, "lock-object-namespace", cfg.LeaderElection.ResourceNamespace, "DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. This parameter is ignored if a config file is specified in --config.")
fs.StringVar(&cfg.LeaderElection.ResourceName, "lock-object-name", cfg.LeaderElection.ResourceName, "DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name. This parameter is ignored if a config file is specified in --config.")
fs.Int32Var(&o.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", o.HardPodAffinitySymmetricWeight,
"DEPRECATED: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+
"to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. Must be in the range 0-100."+
"This option was moved to the policy configuration file")
fs.StringVar(&o.SchedulerName, "scheduler-name", o.SchedulerName, "DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.schedulerName\".")
"This parameter is ignored if a config file is specified in --config.")
fs.StringVar(&o.SchedulerName, "scheduler-name", o.SchedulerName, "DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.schedulerName\".This parameter is ignored if a config file is specified in --config.")
// MarkDeprecated hides the flag from the help. We don't want that:
// fs.MarkDeprecated("hard-pod-affinity-symmetric-weight", "This option was moved to the policy configuration file")
}

View File

@ -44,10 +44,10 @@ func (o *CombinedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {
return
}
fs.StringVar(&o.BindAddress, "address", o.BindAddress, "DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). See --bind-address instead.")
fs.StringVar(&o.BindAddress, "address", o.BindAddress, "DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). See --bind-address instead. This parameter is ignored if a config file is specified in --config.")
// MarkDeprecated hides the flag from the help. We don't want that:
// fs.MarkDeprecated("address", "see --bind-address instead.")
fs.IntVar(&o.BindPort, "port", o.BindPort, "DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead.")
fs.IntVar(&o.BindPort, "port", o.BindPort, "DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. This parameter is ignored if a config file is specified in --config.")
// MarkDeprecated hides the flag from the help. We don't want that:
// fs.MarkDeprecated("port", "see --secure-port instead.")
}

View File

@ -151,12 +151,10 @@ func newDefaultComponentConfig() (*kubeschedulerconfig.KubeSchedulerConfiguratio
func (o *Options) Flags() (nfs cliflag.NamedFlagSets) {
fs := nfs.FlagSet("misc")
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, `The path to the configuration file. The following flags can overwrite fields in this file:
--address
--port
--use-legacy-policy-config
--policy-configmap
--algorithm-provider
--policy-config-file
--algorithm-provider`)
--policy-configmap
--policy-configmap-namespace`)
fs.StringVar(&o.WriteConfigTo, "write-config-to", o.WriteConfigTo, "If set, write the configuration values to this file and exit.")
fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")

View File

@ -471,12 +471,12 @@ func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionB
// IMPORTANT NOTE : the returned pods should NOT be modified.
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
sel, err := metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if sel.Empty() {
return []*v1.Pod{}, nil
}
if err != nil {
return []*v1.Pod{}, err
}
if sel.Empty() {
return []*v1.Pod{}, nil
}
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
if err != nil {
return []*v1.Pod{}, err

View File

@ -393,6 +393,11 @@ func (gb *GraphBuilder) addDependentToOwners(n *node, owners []metav1.OwnerRefer
klog.V(2).Infof("node %s references an owner %s with coordinates that do not match the observed identity", n.identity, ownerNode.identity)
}
hasPotentiallyInvalidOwnerReference = true
} else if !ownerIsNamespaced && ownerNode.identity.Namespace != n.identity.Namespace && !ownerNode.isObserved() {
// the ownerNode is cluster-scoped and virtual, and does not match the child node's namespace.
// the owner could be a missing instance of a namespaced type incorrectly referenced by a cluster-scoped child (issue #98040).
// enqueue this child to attemptToDelete to verify parent references.
hasPotentiallyInvalidOwnerReference = true
}
}
}

View File

@ -391,7 +391,11 @@ func (adc *attachDetachController) populateActualStateOfWorld() error {
adc.addNodeToDswp(node, types.NodeName(node.Name))
}
}
return nil
err = adc.processVolumeAttachments()
if err != nil {
klog.Errorf("Failed to process volume attachments: %v", err)
}
return err
}
func (adc *attachDetachController) getNodeVolumeDevicePath(
@ -461,7 +465,12 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
err)
continue
}
if adc.actualStateOfWorld.IsVolumeAttachedToNode(volumeName, nodeName) {
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateAttached {
klog.V(10).Infof("Volume %q is attached to node %q. Marking as attached in ActualStateOfWorld",
volumeName,
nodeName,
)
devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName)
if err != nil {
klog.Errorf("Failed to find device path: %v", err)
@ -679,6 +688,67 @@ func (adc *attachDetachController) processVolumesInUse(
}
}
// Process Volume-Attachment objects.
// Should be called only after populating attached volumes in the ASW.
// For each VA object, this function checks if its present in the ASW.
// If not, adds the volume to ASW as an "uncertain" attachment.
// In the reconciler, the logic checks if the volume is present in the DSW;
// if yes, the reconciler will attempt attach on the volume;
// if not (could be a dangling attachment), the reconciler will detach this volume.
func (adc *attachDetachController) processVolumeAttachments() error {
vas, err := adc.volumeAttachmentLister.List(labels.Everything())
if err != nil {
klog.Errorf("failed to list VolumeAttachment objects: %v", err)
return err
}
for _, va := range vas {
nodeName := types.NodeName(va.Spec.NodeName)
pvName := va.Spec.Source.PersistentVolumeName
if pvName == nil {
// Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning
klog.Warningf("Skipping the va as its pvName is nil, va.Name: %q, nodeName: %q",
va.Name, nodeName)
continue
}
pv, err := adc.pvLister.Get(*pvName)
if err != nil {
klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err)
continue
}
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil {
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
klog.Warningf(
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
*pvName,
nodeName,
err)
continue
}
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil {
klog.Errorf(
"Failed to find unique name for volume:%q, va.Name:%q, nodeName:%q: %v",
*pvName,
va.Name,
nodeName,
err)
continue
}
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateDetached {
klog.V(1).Infof("Marking volume attachment as uncertain as volume:%q (%q) is not attached (%v)",
volumeName, nodeName, attachState)
err = adc.actualStateOfWorld.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName)
if err != nil {
klog.Errorf("MarkVolumeAsUncertain fail to add the volume %q (%q) to ASW. err: %s", volumeName, nodeName, err)
}
}
}
return nil
}
var _ volume.VolumeHost = &attachDetachController{}
var _ volume.AttachDetachVolumeHost = &attachDetachController{}

View File

@ -96,10 +96,13 @@ type ActualStateOfWorld interface {
// nodes, the volume is also deleted.
DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// IsVolumeAttachedToNode returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to
// the specified node.
IsVolumeAttachedToNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool
// GetAttachState returns the attach state for the given volume-node
// combination.
// Returns AttachStateAttached if the specified volume/node combo exists in
// the underlying store indicating the specified volume is attached to the
// specified node, AttachStateDetached if the combo does not exist, or
// AttachStateUncertain if the attached state is marked as uncertain.
GetAttachState(volumeName v1.UniqueVolumeName, nodeName types.NodeName) AttachState
// GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes might attached to which nodes based on the
@ -153,6 +156,31 @@ type AttachedVolume struct {
DetachRequestedTime time.Time
}
// AttachState represents the attach state of a volume to a node known to the
// Actual State of World.
// This type is used as external representation of attach state (specifically
// as the return type of GetAttachState only); the state is represented
// differently in the internal cache implementation.
type AttachState int
const (
// AttachStateAttached represents the state in which the volume is attached to
// the node.
AttachStateAttached AttachState = iota
// AttachStateUncertain represents the state in which the Actual State of World
// does not know whether the volume is attached to the node.
AttachStateUncertain
// AttachStateDetached represents the state in which the volume is not
// attached to the node.
AttachStateDetached
)
func (s AttachState) String() string {
return []string{"Attached", "Uncertain", "Detached"}[s]
}
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{
@ -530,19 +558,22 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
}
func (asw *actualStateOfWorld) IsVolumeAttachedToNode(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool {
func (asw *actualStateOfWorld) GetAttachState(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) AttachState {
asw.RLock()
defer asw.RUnlock()
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists {
if node, nodeExists := volumeObj.nodesAttachedTo[nodeName]; nodeExists {
return node.attachedConfirmed
if node.attachedConfirmed {
return AttachStateAttached
}
return AttachStateUncertain
}
}
return false
return AttachStateDetached
}
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {

View File

@ -142,6 +142,7 @@ func (rc *reconciler) reconcile() {
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) {
// Check whether there already exist an operation pending, and don't even
// try to start an operation if there is already one running.
// This check must be done before we do any other checks, as otherwise the other checks
@ -161,6 +162,21 @@ func (rc *reconciler) reconcile() {
}
}
// Because the detach operation updates the ActualStateOfWorld before
// marking itself complete, it's possible for the volume to be removed
// from the ActualStateOfWorld between the GetAttachedVolumes() check
// and the IsOperationPending() check above.
// Check the ActualStateOfWorld again to avoid issuing an unnecessary
// detach.
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
if attachState == cache.AttachStateDetached {
if klog.V(5).Enabled() {
klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached--skipping", ""))
}
continue
}
// Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
@ -226,7 +242,31 @@ func (rc *reconciler) reconcile() {
func (rc *reconciler) attachDesiredVolumes() {
// Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if rc.actualStateOfWorld.IsVolumeAttachedToNode(volumeToAttach.VolumeName, volumeToAttach.NodeName) {
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
}
// Because the attach operation updates the ActualStateOfWorld before
// marking itself complete, IsOperationPending() must be checked before
// GetAttachState() to guarantee the ActualStateOfWorld is
// up-to-date when it's read.
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
if attachState == cache.AttachStateAttached {
// Volume/Node exists, touch it to reset detachRequestedTime
if klog.V(5).Enabled() {
klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
@ -235,26 +275,7 @@ func (rc *reconciler) attachDesiredVolumes() {
continue
}
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
}
if !util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName)
if len(nodes) > 0 {
if !volumeToAttach.MultiAttachErrorReported {
@ -263,7 +284,6 @@ func (rc *reconciler) attachDesiredVolumes() {
}
continue
}
}
// Volume/Node doesn't exist, spawn a goroutine to attach it

View File

@ -25,7 +25,6 @@ go_library(
"kubelet_resources.go",
"kubelet_volumes.go",
"pod_container_deletor.go",
"pod_sandbox_deleter.go",
"pod_workers.go",
"reason_cache.go",
"runonce.go",
@ -186,7 +185,6 @@ go_test(
"kubelet_volumes_linux_test.go",
"kubelet_volumes_test.go",
"pod_container_deletor_test.go",
"pod_sandbox_deleter_test.go",
"pod_workers_test.go",
"reason_cache_test.go",
"runonce_test.go",
@ -264,14 +262,21 @@ go_test(
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//staging/src/k8s.io/component-base/version:go_default_library",
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
"//staging/src/k8s.io/mount-utils:go_default_library",
"//vendor/github.com/golang/groupcache/lru:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
],
] + select({
"@io_bazel_rules_go//go/platform:android": [
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//staging/src/k8s.io/cri-api/pkg/apis/runtime/v1alpha2:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(

View File

@ -68,10 +68,26 @@ func (s *scope) Name() string {
return s.name
}
func (s *scope) GetAffinity(podUID string, containerName string) TopologyHint {
func (s *scope) getTopologyHints(podUID string, containerName string) TopologyHint {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.podTopologyHints[podUID][containerName]
}
func (s *scope) setTopologyHints(podUID string, containerName string, th TopologyHint) {
s.mutex.Lock()
defer s.mutex.Unlock()
if s.podTopologyHints[podUID] == nil {
s.podTopologyHints[podUID] = make(map[string]TopologyHint)
}
s.podTopologyHints[podUID][containerName] = th
}
func (s *scope) GetAffinity(podUID string, containerName string) TopologyHint {
return s.getTopologyHints(podUID, containerName)
}
func (s *scope) AddHintProvider(h HintProvider) {
s.hintProviders = append(s.hintProviders, h)
}

View File

@ -55,13 +55,9 @@ func (s *containerScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
if !admit {
return topologyAffinityError()
}
if (s.podTopologyHints)[string(pod.UID)] == nil {
(s.podTopologyHints)[string(pod.UID)] = make(map[string]TopologyHint)
}
klog.Infof("[topologymanager] Topology Affinity for (pod: %v container: %v): %v", format.Pod(pod), container.Name, bestHint)
(s.podTopologyHints)[string(pod.UID)][container.Name] = bestHint
s.setTopologyHints(string(pod.UID), container.Name, bestHint)
err := s.allocateAlignedResources(pod, &container)
if err != nil {
return unexpectedAdmissionError(err)

View File

@ -56,12 +56,7 @@ func (s *podScope) Admit(pod *v1.Pod) lifecycle.PodAdmitResult {
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
klog.Infof("[topologymanager] Topology Affinity for (pod: %v container: %v): %v", format.Pod(pod), container.Name, bestHint)
if (s.podTopologyHints)[string(pod.UID)] == nil {
(s.podTopologyHints)[string(pod.UID)] = make(map[string]TopologyHint)
}
(s.podTopologyHints)[string(pod.UID)][container.Name] = bestHint
s.setTopologyHints(string(pod.UID), container.Name, bestHint)
err := s.allocateAlignedResources(pod, &container)
if err != nil {

View File

@ -114,8 +114,6 @@ type Runtime interface {
GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error)
// Delete a container. If the container is still running, an error is returned.
DeleteContainer(containerID ContainerID) error
// DeleteSandbox deletes a sandbox.
DeleteSandbox(sandboxID string) error
// ImageService provides methods to image-related methods.
ImageService
// UpdatePodCIDR sends a new podCIDR to the runtime.
@ -301,6 +299,7 @@ type PodStatus struct {
// Status of containers in the pod.
ContainerStatuses []*Status
// Status of the pod sandbox.
// Only for kuberuntime now, other runtime may keep it nil.
SandboxStatuses []*runtimeapi.PodSandboxStatus
}
@ -310,8 +309,6 @@ type Status struct {
ID ContainerID
// Name of the container.
Name string
// ID of the sandbox to which this container belongs.
PodSandboxID string
// Status of the container.
State State
// Creation time of the container.

View File

@ -24,6 +24,7 @@ import (
"net/http"
"path"
"path/filepath"
"runtime"
"sync"
"time"
@ -254,24 +255,28 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
ds.network = network.NewPluginManager(plug)
klog.Infof("Docker cri networking managed by %v", plug.Name())
// NOTE: cgroup driver is only detectable in docker 1.11+
cgroupDriver := defaultCgroupDriver
dockerInfo, err := ds.client.Info()
klog.Infof("Docker Info: %+v", dockerInfo)
if err != nil {
klog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
} else if len(dockerInfo.CgroupDriver) == 0 {
klog.Warningf("No cgroup driver is set in Docker")
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
} else {
cgroupDriver = dockerInfo.CgroupDriver
// skipping cgroup driver checks for Windows
if runtime.GOOS == "linux" {
// NOTE: cgroup driver is only detectable in docker 1.11+
cgroupDriver := defaultCgroupDriver
dockerInfo, err := ds.client.Info()
klog.Infof("Docker Info: %+v", dockerInfo)
if err != nil {
klog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
} else if len(dockerInfo.CgroupDriver) == 0 {
klog.Warningf("No cgroup driver is set in Docker")
klog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
} else {
cgroupDriver = dockerInfo.CgroupDriver
}
if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver {
return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver)
}
klog.Infof("Setting cgroupDriver to %s", cgroupDriver)
ds.cgroupDriver = cgroupDriver
}
if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver {
return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver)
}
klog.Infof("Setting cgroupDriver to %s", cgroupDriver)
ds.cgroupDriver = cgroupDriver
ds.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return ds.getDockerVersion()

View File

@ -148,14 +148,14 @@ func (f *fakeIPTables) ensureRule(position utiliptables.RulePosition, tableName
return true, nil
}
if position == utiliptables.Prepend {
switch position {
case utiliptables.Prepend:
chain.rules = append([]string{rule}, chain.rules...)
} else if position == utiliptables.Append {
case utiliptables.Append:
chain.rules = append(chain.rules, rule)
} else {
default:
return false, fmt.Errorf("unknown position argument %q", position)
}
return false, nil
}
@ -185,7 +185,7 @@ func normalizeRule(rule string) (string, error) {
// Normalize un-prefixed IP addresses like iptables does
if net.ParseIP(arg) != nil {
arg = arg + "/32"
arg += "/32"
}
if len(normalized) > 0 {
@ -281,7 +281,10 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte,
if strings.HasPrefix(line, ":") {
chainName := utiliptables.Chain(strings.Split(line[1:], " ")[0])
if flush == utiliptables.FlushTables {
table, chain, _ := f.getChain(tableName, chainName)
table, chain, err := f.getChain(tableName, chainName)
if err != nil {
return err
}
if chain != nil {
delete(table.chains, string(chainName))
}

View File

@ -21,6 +21,7 @@ package hostport
import (
"fmt"
"net"
"strconv"
"strings"
"k8s.io/klog/v2"
@ -53,7 +54,18 @@ type PodPortMapping struct {
IP net.IP
}
// ipFamily refers to a specific family if not empty, i.e. "4" or "6".
type ipFamily string
// Constants for valid IPFamily:
const (
IPv4 ipFamily = "4"
IPv6 ipFamily = "6"
)
type hostport struct {
ipFamily ipFamily
ip string
port int32
protocol string
}
@ -78,19 +90,23 @@ func openLocalPort(hp *hostport) (closeable, error) {
// bind()ed but not listen()ed, and at least the default debian netcat
// has no way to avoid about 10 seconds of retries.
var socket closeable
// open the socket on the HostIP and HostPort specified
address := net.JoinHostPort(hp.ip, strconv.Itoa(int(hp.port)))
switch hp.protocol {
case "tcp":
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", hp.port))
network := "tcp" + string(hp.ipFamily)
listener, err := net.Listen(network, address)
if err != nil {
return nil, err
}
socket = listener
case "udp":
addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", hp.port))
network := "udp" + string(hp.ipFamily)
addr, err := net.ResolveUDPAddr(network, address)
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
conn, err := net.ListenUDP(network, addr)
if err != nil {
return nil, err
}
@ -103,8 +119,10 @@ func openLocalPort(hp *hostport) (closeable, error) {
}
// portMappingToHostport creates hostport structure based on input portmapping
func portMappingToHostport(portMapping *PortMapping) hostport {
func portMappingToHostport(portMapping *PortMapping, family ipFamily) hostport {
return hostport{
ipFamily: family,
ip: portMapping.HostIP,
port: portMapping.HostPort,
protocol: strings.ToLower(string(portMapping.Protocol)),
}
@ -124,9 +142,11 @@ func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
args := []string{"-m", "comment", "--comment", "kube hostport portals",
args := []string{
"-m", "comment", "--comment", "kube hostport portals",
"-m", "addrtype", "--dst-type", "LOCAL",
"-j", string(kubeHostportsChain)}
"-j", string(kubeHostportsChain),
}
for _, tc := range tableChainsNeedJumpServices {
// KUBE-HOSTPORTS chain needs to be appended to the system chains.
// This ensures KUBE-SERVICES chain gets processed first.

View File

@ -59,6 +59,7 @@ type hostportManager struct {
mu sync.Mutex
}
// NewHostportManager creates a new HostPortManager
func NewHostportManager(iptables utiliptables.Interface) HostPortManager {
h := &hostportManager{
hostPortMap: make(map[hostport]closeable),
@ -78,13 +79,6 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
return nil
}
podFullName := getPodFullName(podPortMapping)
// skip if there is no hostport needed
hostportMappings := gatherHostportMappings(podPortMapping)
if len(hostportMappings) == 0 {
return nil
}
// IP.To16() returns nil if IP is not a valid IPv4 or IPv6 address
if podPortMapping.IP.To16() == nil {
return fmt.Errorf("invalid or missing IP of pod %s", podFullName)
@ -92,11 +86,17 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
podIP := podPortMapping.IP.String()
isIPv6 := utilnet.IsIPv6(podPortMapping.IP)
// skip if there is no hostport needed
hostportMappings := gatherHostportMappings(podPortMapping, isIPv6)
if len(hostportMappings) == 0 {
return nil
}
if isIPv6 != hm.iptables.IsIPv6() {
return fmt.Errorf("HostPortManager IP family mismatch: %v, isIPv6 - %v", podIP, isIPv6)
}
if err = ensureKubeHostportChains(hm.iptables, natInterfaceName); err != nil {
if err := ensureKubeHostportChains(hm.iptables, natInterfaceName); err != nil {
return err
}
@ -152,10 +152,17 @@ func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInt
// DNAT to the podIP:containerPort
hostPortBinding := net.JoinHostPort(podIP, strconv.Itoa(int(pm.ContainerPort)))
writeLine(natRules, "-A", string(chain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort),
"-m", protocol, "-p", protocol,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s", hostPortBinding))
if pm.HostIP == "" || pm.HostIP == "0.0.0.0" || pm.HostIP == "::" {
writeLine(natRules, "-A", string(chain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort),
"-m", protocol, "-p", protocol,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s", hostPortBinding))
} else {
writeLine(natRules, "-A", string(chain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort),
"-m", protocol, "-p", protocol, "-d", pm.HostIP,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s", hostPortBinding))
}
}
// getHostportChain should be able to provide unique hostport chain name using hash
@ -198,8 +205,8 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
return nil
}
hostportMappings := gatherHostportMappings(podPortMapping)
if len(hostportMappings) <= 0 {
hostportMappings := gatherHostportMappings(podPortMapping, hm.iptables.IsIPv6())
if len(hostportMappings) == 0 {
return nil
}
@ -231,6 +238,12 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
}
}
// exit if there is nothing to remove
// don´t forget to clean up opened pod host ports
if len(existingChainsToRemove) == 0 {
return hm.closeHostports(hostportMappings)
}
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
@ -245,7 +258,7 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er
}
writeLine(natRules, "COMMIT")
if err = hm.syncIPTables(append(natChains.Bytes(), natRules.Bytes()...)); err != nil {
if err := hm.syncIPTables(append(natChains.Bytes(), natRules.Bytes()...)); err != nil {
return err
}
@ -279,7 +292,12 @@ func (hm *hostportManager) openHostports(podPortMapping *PodPortMapping) (map[ho
continue
}
hp := portMappingToHostport(pm)
// HostIP IP family is not handled by this port opener
if pm.HostIP != "" && utilnet.IsIPv6String(pm.HostIP) != hm.iptables.IsIPv6() {
continue
}
hp := portMappingToHostport(pm, hm.getIPFamily())
socket, err := hm.portOpener(&hp)
if err != nil {
retErr = fmt.Errorf("cannot open hostport %d for pod %s: %v", pm.HostPort, getPodFullName(podPortMapping), err)
@ -304,7 +322,7 @@ func (hm *hostportManager) openHostports(podPortMapping *PodPortMapping) (map[ho
func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error {
errList := []error{}
for _, pm := range hostportMappings {
hp := portMappingToHostport(pm)
hp := portMappingToHostport(pm, hm.getIPFamily())
if socket, ok := hm.hostPortMap[hp]; ok {
klog.V(2).Infof("Closing host port %s", hp.String())
if err := socket.Close(); err != nil {
@ -312,11 +330,22 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error
continue
}
delete(hm.hostPortMap, hp)
} else {
klog.V(5).Infof("host port %s does not have an open socket", hp.String())
}
}
return utilerrors.NewAggregate(errList)
}
// getIPFamily returns the hostPortManager IP family
func (hm *hostportManager) getIPFamily() ipFamily {
family := IPv4
if hm.iptables.IsIPv6() {
family = IPv6
}
return family
}
// getHostportChain takes id, hostport and protocol for a pod and returns associated iptables chain.
// This is computed by hashing (sha256) then encoding to base32 and truncating with the prefix
// "KUBE-HP-". We do this because IPTables Chain Names must be <= 28 chars long, and the longer
@ -324,18 +353,22 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error
// WARNING: Please do not change this function. Otherwise, HostportManager may not be able to
// identify existing iptables chains.
func getHostportChain(id string, pm *PortMapping) utiliptables.Chain {
hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol)))
hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + pm.HostIP))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// gatherHostportMappings returns all the PortMappings which has hostport for a pod
func gatherHostportMappings(podPortMapping *PodPortMapping) []*PortMapping {
// it filters the PortMappings that use HostIP and doesn't match the IP family specified
func gatherHostportMappings(podPortMapping *PodPortMapping, isIPv6 bool) []*PortMapping {
mappings := []*PortMapping{}
for _, pm := range podPortMapping.PortMappings {
if pm.HostPort <= 0 {
continue
}
if pm.HostIP != "" && utilnet.IsIPv6String(pm.HostIP) != isIPv6 {
continue
}
mappings = append(mappings, pm)
}
return mappings

View File

@ -124,6 +124,9 @@ const (
// Max amount of time to wait for the container runtime to come up.
maxWaitForContainerRuntime = 30 * time.Second
// Max amount of time to wait for node list/watch to initially sync
maxWaitForAPIServerSync = 10 * time.Second
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5
@ -431,14 +434,30 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
serviceHasSynced = func() bool { return true }
}
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
var nodeHasSynced cache.InformerSynced
var nodeLister corelisters.NodeLister
if kubeDeps.KubeClient != nil {
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
nodeLW := cache.NewListWatchFromClient(kubeDeps.KubeClient.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
r := cache.NewReflector(nodeLW, &v1.Node{}, nodeIndexer, 0)
go r.Run(wait.NeverStop)
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
options.FieldSelector = fields.Set{api.ObjectNameField: string(nodeName)}.String()
}))
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
nodeHasSynced = func() bool {
if kubeInformers.Core().V1().Nodes().Informer().HasSynced() {
return true
}
klog.Infof("kubelet nodes not sync")
return false
}
kubeInformers.Start(wait.NeverStop)
klog.Info("Kubelet client is not nil")
} else {
// we dont have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }
klog.Info("Kubelet client is nil")
}
nodeLister := corelisters.NewNodeLister(nodeIndexer)
// construct a node reference used for events
nodeRef := &v1.ObjectReference{
@ -481,6 +500,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
serviceLister: serviceLister,
serviceHasSynced: serviceHasSynced,
nodeLister: nodeLister,
nodeHasSynced: nodeHasSynced,
masterServiceNamespace: masterServiceNamespace,
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
recorder: kubeDeps.Recorder,
@ -665,7 +685,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
}
klet.containerGC = containerGC
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
klet.sandboxDeleter = newPodSandboxDeleter(klet.containerRuntime)
// setup imageManager
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, crOptions.PodSandboxImage)
@ -882,7 +901,9 @@ type Kubelet struct {
serviceHasSynced cache.InformerSynced
// nodeLister knows how to list nodes
nodeLister corelisters.NodeLister
// nodeHasSynced indicates whether nodes have been sync'd at least once.
// Check this before trusting a response from the node lister.
nodeHasSynced cache.InformerSynced
// a list of node labels to register
nodeLabels map[string]string
@ -1101,9 +1122,6 @@ type Kubelet struct {
// trigger deleting containers in a pod
containerDeletor *podContainerDeletor
// trigger deleting sandboxes in a pod
sandboxDeleter *podSandboxDeleter
// config iptables util rules
makeIPTablesUtilChains bool
@ -1477,6 +1495,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
return nil
}
// If the pod is a static pod and its mirror pod is still gracefully terminating,
// we do not want to start the new static pod until the old static pod is gracefully terminated.
podFullName := kubecontainer.GetPodFullName(pod)
if kl.podKiller.IsMirrorPodPendingTerminationByPodName(podFullName) {
return fmt.Errorf("pod %q is pending termination", podFullName)
}
// Latency measurements for the main workflow are relative to the
// first time the pod was seen by the API server.
var firstSeenTime time.Time
@ -1612,7 +1637,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
// Create Mirror Pod for Static Pod if it doesn't already exist
if kubetypes.IsStaticPod(pod) {
podFullName := kubecontainer.GetPodFullName(pod)
deleted := false
if mirrorPod != nil {
if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
@ -1743,6 +1767,9 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
}
podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod}
if _, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
kl.podKiller.MarkMirrorPodPendingTermination(pod)
}
kl.podKiller.KillPod(&podPair)
// We leave the volume/directory cleanup to the periodic cleanup routine.
@ -1930,9 +1957,6 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
klog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e)
}
}
if e.Type == pleg.ContainerRemoved {
kl.deletePodSandbox(e.ID)
}
if e.Type == pleg.ContainerDied {
if containerID, ok := e.Data.(string); ok {
@ -2084,9 +2108,6 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
kl.handleMirrorPod(pod, start)
continue
}
if _, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
kl.podKiller.MarkMirrorPodPendingTermination(pod)
}
// Deletion is allowed to fail because the periodic cleanup routine
// will trigger deletion again.
if err := kl.deletePod(pod); err != nil {
@ -2256,16 +2277,6 @@ func (kl *Kubelet) fastStatusUpdateOnce() {
}
}
func (kl *Kubelet) deletePodSandbox(podID types.UID) {
if podStatus, err := kl.podCache.Get(podID); err == nil {
toKeep := 1
if kl.IsPodDeleted(podID) {
toKeep = 0
}
kl.sandboxDeleter.deleteSandboxesInPod(podStatus, toKeep)
}
}
// isSyncPodWorthy filters out events that are not worthy of pod syncing
func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
// ContainerRemoved doesn't affect pod state

View File

@ -22,6 +22,7 @@ import (
"io/ioutil"
"net"
"path/filepath"
"time"
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
cadvisorv2 "github.com/google/cadvisor/info/v2"
@ -32,6 +33,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -235,6 +237,15 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.kubeClient == nil {
return kl.initialNode(context.TODO())
}
// if we have a valid kube client, we wait for initial lister to sync
if !kl.nodeHasSynced() {
err := wait.PollImmediate(time.Second, maxWaitForAPIServerSync, func() (bool, error) {
return kl.nodeHasSynced(), nil
})
if err != nil {
return nil, fmt.Errorf("nodes have not yet been read at least once, cannot construct node object")
}
}
return kl.nodeLister.Get(string(kl.nodeName))
}
@ -245,7 +256,7 @@ func (kl *Kubelet) GetNode() (*v1.Node, error) {
// zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if kl.kubeClient != nil {
if n, err := kl.nodeLister.Get(string(kl.nodeName)); err == nil {
if n, err := kl.GetNode(); err == nil {
return n, nil
}
}

View File

@ -127,7 +127,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
// reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported
func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool {
requiresUpdate := false
requiresUpdate := updateDefaultResources(initialNode, existingNode)
supportedHugePageResources := sets.String{}
for resourceName := range initialNode.Status.Capacity {
@ -174,7 +174,7 @@ func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node)
// Zeros out extended resource capacity during reconciliation.
func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
requiresUpdate := false
requiresUpdate := updateDefaultResources(initialNode, node)
// Check with the device manager to see if node has been recreated, in which case extended resources should be zeroed until they are available
if kl.containerManager.ShouldResetExtendedResourceCapacity() {
for k := range node.Status.Capacity {
@ -189,6 +189,29 @@ func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
return requiresUpdate
}
// updateDefaultResources will set the default resources on the existing node according to the initial node
func updateDefaultResources(initialNode, existingNode *v1.Node) bool {
requiresUpdate := false
if existingNode.Status.Capacity == nil {
if initialNode.Status.Capacity != nil {
existingNode.Status.Capacity = initialNode.Status.Capacity.DeepCopy()
requiresUpdate = true
} else {
existingNode.Status.Capacity = make(map[v1.ResourceName]resource.Quantity)
}
}
if existingNode.Status.Allocatable == nil {
if initialNode.Status.Allocatable != nil {
existingNode.Status.Allocatable = initialNode.Status.Allocatable.DeepCopy()
requiresUpdate = true
} else {
existingNode.Status.Allocatable = make(map[v1.ResourceName]resource.Quantity)
}
}
return requiresUpdate
}
// updateDefaultLabels will set the default labels on the node
func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool {
defaultLabels := []string{

View File

@ -966,16 +966,6 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo
klog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr)
return false
}
// pod's sandboxes should be deleted
if len(runtimeStatus.SandboxStatuses) > 0 {
var sandboxStr string
for _, sandbox := range runtimeStatus.SandboxStatuses {
sandboxStr += fmt.Sprintf("%+v ", *sandbox)
}
klog.V(3).Infof("Pod %q is terminated, but some pod sandboxes have not been cleaned up: %s", format.Pod(pod), sandboxStr)
return false
}
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
// We shouldn't delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod))

View File

@ -474,7 +474,6 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, n
cStatus.Message += tMessage
}
}
cStatus.PodSandboxID = c.PodSandboxId
statuses[i] = cStatus
}

View File

@ -161,13 +161,26 @@ func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemo
// Remove from oldest to newest (last to first).
for i := len(sandboxes) - 1; i >= numToKeep; i-- {
if !sandboxes[i].active {
if err := cgc.manager.DeleteSandbox(sandboxes[i].id); err != nil {
klog.Errorf("Failed to remove sandbox %q: %v", sandboxes[i].id, err)
}
cgc.removeSandbox(sandboxes[i].id)
}
}
}
// removeSandbox removes the sandbox by sandboxID.
func (cgc *containerGC) removeSandbox(sandboxID string) {
klog.V(4).Infof("Removing sandbox %q", sandboxID)
// In normal cases, kubelet should've already called StopPodSandbox before
// GC kicks in. To guard against the rare cases where this is not true, try
// stopping the sandbox before removing it.
if err := cgc.client.StopPodSandbox(sandboxID); err != nil {
klog.Errorf("Failed to stop sandbox %q before removing: %v", sandboxID, err)
return
}
if err := cgc.client.RemovePodSandbox(sandboxID); err != nil {
klog.Errorf("Failed to remove sandbox %q: %v", sandboxID, err)
}
}
// evictableContainers gets all containers that are evictable. Evictable containers are: not running
// and created more than MinAge ago.
func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, error) {

View File

@ -313,15 +313,3 @@ func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string,
}
return url.Parse(resp.Url)
}
// DeleteSandbox removes the sandbox by sandboxID.
func (m *kubeGenericRuntimeManager) DeleteSandbox(sandboxID string) error {
klog.V(4).Infof("Removing sandbox %q", sandboxID)
// stop sandbox is called as part of kill pod function but the error is ignored. So,
// we have to call stop sandbox again to make sure that all the resources like network
// are cleaned by runtime.
if err := m.runtimeService.StopPodSandbox(sandboxID); err != nil {
return err
}
return m.runtimeService.RemovePodSandbox(sandboxID)
}

View File

@ -1,82 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"sort"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/klog/v2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
const (
// The number of sandboxes which can be deleted in parallel.
sandboxDeletionBufferLimit = 20
)
type sandboxStatusByCreatedList []*runtimeapi.PodSandboxStatus
type podSandboxDeleter struct {
worker chan<- string
}
func (a sandboxStatusByCreatedList) Len() int { return len(a) }
func (a sandboxStatusByCreatedList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a sandboxStatusByCreatedList) Less(i, j int) bool {
return a[i].CreatedAt > a[j].CreatedAt
}
func newPodSandboxDeleter(runtime kubecontainer.Runtime) *podSandboxDeleter {
buffer := make(chan string, sandboxDeletionBufferLimit)
go wait.Forever(func() {
for id := range buffer {
if err := runtime.DeleteSandbox(id); err != nil {
klog.Warningf("[pod_sandbox_deleter] DeleteSandbox returned error for (id=%v): %v", id, err)
}
}
}, 0)
return &podSandboxDeleter{
worker: buffer,
}
}
// deleteSandboxesInPod issues sandbox deletion requests for all inactive sandboxes after sorting by creation time
// and skipping toKeep number of sandboxes
func (p *podSandboxDeleter) deleteSandboxesInPod(podStatus *kubecontainer.PodStatus, toKeep int) {
sandboxIDs := sets.NewString()
for _, containerStatus := range podStatus.ContainerStatuses {
sandboxIDs.Insert(containerStatus.PodSandboxID)
}
sandboxStatuses := podStatus.SandboxStatuses
if toKeep > 0 {
sort.Sort(sandboxStatusByCreatedList(sandboxStatuses))
}
for i := len(sandboxStatuses) - 1; i >= toKeep; i-- {
if _, ok := sandboxIDs[sandboxStatuses[i].Id]; !ok && sandboxStatuses[i].State != runtimeapi.PodSandboxState_SANDBOX_READY {
select {
case p.worker <- sandboxStatuses[i].Id:
default:
klog.Warningf("Failed to issue the request to remove sandbox %v", sandboxStatuses[i].Id)
}
}
}
}

View File

@ -358,7 +358,13 @@ func NewFilteredDialContext(wrapped DialContext, resolv Resolver, opts *Filtered
return wrapped
}
return func(ctx context.Context, network, address string) (net.Conn, error) {
resp, err := resolv.LookupIPAddr(ctx, address)
// DialContext is given host:port. LookupIPAddress expects host.
addressToResolve, _, err := net.SplitHostPort(address)
if err != nil {
addressToResolve = address
}
resp, err := resolv.LookupIPAddr(ctx, addressToResolve)
if err != nil {
return nil, err
}

View File

@ -11,6 +11,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/validation:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithmprovider:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",

View File

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/core/validation"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
"k8s.io/kubernetes/pkg/scheduler/core"
@ -328,7 +329,8 @@ func (sched *Scheduler) recordSchedulingFailure(fwk framework.Framework, podInfo
}
pod := podInfo.Pod
fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", err.Error())
msg := truncateMessage(err.Error())
fwk.EventRecorder().Eventf(pod, nil, v1.EventTypeWarning, "FailedScheduling", "Scheduling", msg)
if err := updatePod(sched.client, pod, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionFalse,
@ -339,6 +341,16 @@ func (sched *Scheduler) recordSchedulingFailure(fwk framework.Framework, podInfo
}
}
// truncateMessage truncates a message if it hits the NoteLengthLimit.
func truncateMessage(message string) string {
max := validation.NoteLengthLimit
if len(message) <= max {
return message
}
suffix := " ..."
return message[:max-len(suffix)] + suffix
}
func updatePod(client clientset.Interface, pod *v1.Pod, condition *v1.PodCondition, nominatedNode string) error {
klog.V(3).Infof("Updating pod condition for %s/%s to (%s==%s, Reason=%s)", pod.Namespace, pod.Name, condition.Type, condition.Status, condition.Reason)
podCopy := pod.DeepCopy()

View File

@ -29,6 +29,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -22,6 +22,7 @@ import (
"strings"
"sync"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
"k8s.io/utils/exec"
@ -458,11 +459,12 @@ type VolumeHost interface {
// VolumePluginMgr tracks registered plugins.
type VolumePluginMgr struct {
mutex sync.Mutex
plugins map[string]VolumePlugin
prober DynamicPluginProber
probedPlugins map[string]VolumePlugin
Host VolumeHost
mutex sync.Mutex
plugins map[string]VolumePlugin
prober DynamicPluginProber
probedPlugins map[string]VolumePlugin
loggedDeprecationWarnings sets.String
Host VolumeHost
}
// Spec is an internal representation of a volume. All API volume types translate to Spec.
@ -593,6 +595,7 @@ func (pm *VolumePluginMgr) InitPlugins(plugins []VolumePlugin, prober DynamicPlu
defer pm.mutex.Unlock()
pm.Host = host
pm.loggedDeprecationWarnings = sets.NewString()
if prober == nil {
// Use a dummy prober to prevent nil deference.
@ -689,9 +692,7 @@ func (pm *VolumePluginMgr) FindPluginBySpec(spec *Spec) (VolumePlugin, error) {
}
// Issue warning if the matched provider is deprecated
if detail, ok := deprecatedVolumeProviders[matches[0].GetPluginName()]; ok {
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", matches[0].GetPluginName(), detail)
}
pm.logDeprecation(matches[0].GetPluginName())
return matches[0], nil
}
@ -724,12 +725,20 @@ func (pm *VolumePluginMgr) FindPluginByName(name string) (VolumePlugin, error) {
}
// Issue warning if the matched provider is deprecated
if detail, ok := deprecatedVolumeProviders[matches[0].GetPluginName()]; ok {
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", matches[0].GetPluginName(), detail)
}
pm.logDeprecation(matches[0].GetPluginName())
return matches[0], nil
}
// logDeprecation logs warning when a deprecated plugin is used.
func (pm *VolumePluginMgr) logDeprecation(plugin string) {
if detail, ok := deprecatedVolumeProviders[plugin]; ok && !pm.loggedDeprecationWarnings.Has(plugin) {
klog.Warningf("WARNING: %s built-in volume provider is now deprecated. %s", plugin, detail)
// Make sure the message is logged only once. It has Warning severity
// and we don't want to spam the log too much.
pm.loggedDeprecationWarnings.Insert(plugin)
}
}
// Check if probedPlugin cache update is required.
// If it is, initialize all probed plugins and replace the cache with them.
func (pm *VolumePluginMgr) refreshProbedPlugins() {

View File

@ -183,11 +183,40 @@ func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error {
return rerr.Error()
}
func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer {
if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
return *lb
}
frontendIPConfigurations := *lb.FrontendIPConfigurations
for i := range frontendIPConfigurations {
config := frontendIPConfigurations[i]
if config.FrontendIPConfigurationPropertiesFormat != nil &&
config.Subnet != nil &&
config.Subnet.ID != nil {
subnet := network.Subnet{
ID: config.Subnet.ID,
}
if config.Subnet.Name != nil {
subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name
}
config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet
frontendIPConfigurations[i] = config
continue
}
}
lb.FrontendIPConfigurations = &frontendIPConfigurations
return *lb
}
// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
ctx, cancel := getContextWithCancel()
defer cancel()
lb = cleanupSubnetInFrontendIPConfigurations(&lb)
rgName := az.getLoadBalancerResourceGroup()
rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, to.String(lb.Name), lb, to.String(lb.Etag))
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)

View File

@ -1431,10 +1431,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
vmssNamesMap[vmSetName] = true
}
vmssUpdaters := make([]func() error, 0, len(vmssNamesMap))
errors := make([]error, 0, len(vmssNamesMap))
for vmssName := range vmssNamesMap {
vmssName := vmssName
vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault)
if err != nil {
return err
klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get VMSS %s: %v", vmssName, err)
errors = append(errors, err)
continue
}
// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
@ -1450,11 +1455,15 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
vmssNIC := *vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations
primaryNIC, err := ss.getPrimaryNetworkInterfaceConfigurationForScaleSet(vmssNIC, vmssName)
if err != nil {
return err
klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to get the primary network interface config of the VMSS %s: %v", vmssName, err)
errors = append(errors, err)
continue
}
primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC)
if err != nil {
return err
klog.Errorf("ensureBackendPoolDeletedFromVMSS: failed to the primary IP config from the VMSS %s's network config : %v", vmssName, err)
errors = append(errors, err)
continue
}
loadBalancerBackendAddressPools := []compute.SubResource{}
if primaryIPConfig.LoadBalancerBackendAddressPools != nil {
@ -1475,26 +1484,39 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
continue
}
// Compose a new vmss with added backendPoolID.
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
newVMSS := compute.VirtualMachineScaleSet{
Sku: vmss.Sku,
Location: vmss.Location,
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
NetworkInterfaceConfigurations: &vmssNIC,
vmssUpdaters = append(vmssUpdaters, func() error {
// Compose a new vmss with added backendPoolID.
primaryIPConfig.LoadBalancerBackendAddressPools = &newBackendPools
newVMSS := compute.VirtualMachineScaleSet{
Sku: vmss.Sku,
Location: vmss.Location,
VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
VirtualMachineProfile: &compute.VirtualMachineScaleSetVMProfile{
NetworkProfile: &compute.VirtualMachineScaleSetNetworkProfile{
NetworkInterfaceConfigurations: &vmssNIC,
},
},
},
},
}
}
klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
if rerr != nil {
klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, err)
return rerr.Error()
}
klog.V(2).Infof("ensureBackendPoolDeletedFromVMSS begins to update vmss(%s) with backendPoolID %s", vmssName, backendPoolID)
rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS)
if rerr != nil {
klog.Errorf("ensureBackendPoolDeletedFromVMSS CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr)
return rerr.Error()
}
return nil
})
}
errs := utilerrors.AggregateGoroutines(vmssUpdaters...)
if errs != nil {
return utilerrors.Flatten(errs)
}
// Fail if there are other errors.
if len(errors) > 0 {
return utilerrors.Flatten(utilerrors.NewAggregate(errors))
}
return nil

View File

@ -75,6 +75,7 @@ go_library(
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/filter:go_default_library",
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta:go_default_library",
"//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
"//vendor/golang.org/x/oauth2:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/compute/v0.alpha:go_default_library",

View File

@ -28,6 +28,7 @@ import (
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud"
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta"
"github.com/google/go-cmp/cmp"
compute "google.golang.org/api/compute/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -206,7 +207,8 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
// Delete existing forwarding rule before making changes to the backend service. For example - changing protocol
// of backend service without first deleting forwarding rule will throw an error since the linked forwarding
// rule would show the old protocol.
klog.V(2).Infof("ensureInternalLoadBalancer(%v): deleting existing forwarding rule with IP address %v", loadBalancerName, existingFwdRule.IPAddress)
frDiff := cmp.Diff(existingFwdRule, newFwdRule)
klog.V(2).Infof("ensureInternalLoadBalancer(%v): forwarding rule changed - Existing - %+v\n, New - %+v\n, Diff(-existing, +new) - %s\n. Deleting existing forwarding rule.", loadBalancerName, existingFwdRule, newFwdRule, frDiff)
if err = ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil {
return nil, err
}
@ -220,7 +222,8 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
}
if fwdRuleDeleted || existingFwdRule == nil {
if err := g.ensureInternalForwardingRule(existingFwdRule, newFwdRule); err != nil {
// existing rule has been deleted, pass in nil
if err := g.ensureInternalForwardingRule(nil, newFwdRule); err != nil {
return nil, err
}
}
@ -978,11 +981,20 @@ func (g *Cloud) ensureInternalForwardingRule(existingFwdRule, newFwdRule *comput
}
func forwardingRulesEqual(old, new *compute.ForwardingRule) bool {
// basepath could have differences like compute.googleapis.com vs www.googleapis.com, compare resourceIDs
oldResourceID, err := cloud.ParseResourceURL(old.BackendService)
if err != nil {
klog.Errorf("forwardingRulesEqual(): failed to parse backend resource URL from existing FR, err - %v", err)
}
newResourceID, err := cloud.ParseResourceURL(new.BackendService)
if err != nil {
klog.Errorf("forwardingRulesEqual(): failed to parse resource URL from new FR, err - %v", err)
}
return (old.IPAddress == "" || new.IPAddress == "" || old.IPAddress == new.IPAddress) &&
old.IPProtocol == new.IPProtocol &&
old.LoadBalancingScheme == new.LoadBalancingScheme &&
equalStringSets(old.Ports, new.Ports) &&
old.BackendService == new.BackendService &&
oldResourceID.Equal(newResourceID) &&
old.AllowGlobalAccess == new.AllowGlobalAccess &&
old.Subnetwork == new.Subnetwork
}

106
vendor/modules.txt vendored
View File

@ -544,7 +544,7 @@ github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
# github.com/google/btree v1.0.0
github.com/google/btree
# github.com/google/cadvisor v0.38.6
# github.com/google/cadvisor v0.38.7
github.com/google/cadvisor/accelerators
github.com/google/cadvisor/cache/memory
github.com/google/cadvisor/collector
@ -1337,7 +1337,7 @@ gopkg.in/warnings.v0
gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gopkg.in/yaml.v3
# k8s.io/api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.2-k3s1
# k8s.io/api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.4-k3s1
## explicit
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
@ -1385,7 +1385,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.2-k3s1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.4-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1425,7 +1425,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.2-k3s1
# k8s.io/apimachinery v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.4-k3s1
## explicit
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@ -1489,7 +1489,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.2-k3s1
# k8s.io/apiserver v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.4-k3s1
## explicit
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
@ -1628,7 +1628,7 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.2-k3s1
# k8s.io/cli-runtime v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.4-k3s1
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1641,7 +1641,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.2-k3s1
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.4-k3s1
## explicit
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached
@ -1888,7 +1888,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.2-k3s1
# k8s.io/cloud-provider v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.4-k3s1
## explicit
k8s.io/cloud-provider
k8s.io/cloud-provider/api
@ -1909,13 +1909,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.2-k3s1
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.4-k3s1
k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.2-k3s1
# k8s.io/code-generator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.4-k3s1
k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1930,7 +1930,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.2-k3s1
# k8s.io/component-base v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.4-k3s1
## explicit
k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag
@ -1956,13 +1956,13 @@ k8s.io/component-base/metrics/testutil
k8s.io/component-base/term
k8s.io/component-base/version
k8s.io/component-base/version/verflag
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.2-k3s1
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.4-k3s1
k8s.io/component-helpers/apimachinery/lease
k8s.io/component-helpers/auth/rbac/reconciliation
k8s.io/component-helpers/auth/rbac/validation
k8s.io/component-helpers/scheduling/corev1
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
# k8s.io/controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.2-k3s1
# k8s.io/controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.4-k3s1
## explicit
k8s.io/controller-manager/app
k8s.io/controller-manager/config
@ -1972,11 +1972,11 @@ k8s.io/controller-manager/pkg/clientbuilder
k8s.io/controller-manager/pkg/features
k8s.io/controller-manager/pkg/features/register
k8s.io/controller-manager/pkg/informerfactory
# k8s.io/cri-api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.2-k3s1
# k8s.io/cri-api v0.19.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.4-k3s1
## explicit
k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.2-k3s1
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.4-k3s1
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20201113003025-83324d819ded
@ -1994,7 +1994,7 @@ k8s.io/heapster/metrics/api/v1/types
k8s.io/klog
# k8s.io/klog/v2 v2.4.0
k8s.io/klog/v2
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.2-k3s1
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.4-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -2022,7 +2022,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.2-k3s1
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.4-k3s1
k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd
k8s.io/kube-openapi/pkg/aggregator
@ -2038,13 +2038,13 @@ k8s.io/kube-openapi/pkg/validation/spec
k8s.io/kube-openapi/pkg/validation/strfmt
k8s.io/kube-openapi/pkg/validation/strfmt/bson
k8s.io/kube-openapi/pkg/validation/validate
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.2-k3s1
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.4-k3s1
k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.2-k3s1
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.4-k3s1
k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1beta1
k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.2-k3s1
# k8s.io/kubectl v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.4-k3s1
## explicit
k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd
@ -2122,7 +2122,7 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.2-k3s1
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.4-k3s1
k8s.io/kubelet/config/v1alpha1
k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/credentialprovider
@ -2133,7 +2133,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1
k8s.io/kubelet/pkg/apis/podresources/v1
k8s.io/kubelet/pkg/apis/podresources/v1alpha1
k8s.io/kubelet/pkg/apis/stats/v1alpha1
# k8s.io/kubernetes v1.20.2 => github.com/k3s-io/kubernetes v1.20.2-k3s1
# k8s.io/kubernetes v1.20.4 => github.com/k3s-io/kubernetes v1.20.4-k3s1
## explicit
k8s.io/kubernetes/cmd/kube-apiserver/app
k8s.io/kubernetes/cmd/kube-apiserver/app/options
@ -2853,7 +2853,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.2-k3s1
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.4-k3s1
k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth
@ -2895,7 +2895,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.2-k3s1
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.4-k3s1
k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
@ -2911,7 +2911,7 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
k8s.io/metrics/pkg/client/custom_metrics
k8s.io/metrics/pkg/client/custom_metrics/scheme
k8s.io/metrics/pkg/client/external_metrics
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.2-k3s1
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.4-k3s1
k8s.io/mount-utils
# k8s.io/utils v0.0.0-20201110183641-67b214c5f920
k8s.io/utils/buffer
@ -2993,32 +2993,32 @@ sigs.k8s.io/yaml
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
# google.golang.org/grpc => google.golang.org/grpc v1.27.1
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.2-k3s1
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.2-k3s1
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.2-k3s1
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.2-k3s1
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.2-k3s1
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.2-k3s1
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.2-k3s1
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.2-k3s1
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.2-k3s1
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.2-k3s1
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.2-k3s1
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.2-k3s1
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.2-k3s1
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.2-k3s1
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.2-k3s1
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.2-k3s1
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.2-k3s1
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.2-k3s1
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.2-k3s1
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.2-k3s1
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.2-k3s1
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.2-k3s1
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.2-k3s1
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.2-k3s1
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.2-k3s1
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.2-k3s1
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.2-k3s1
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.2-k3s1
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.20.4-k3s1
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.20.4-k3s1
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.20.4-k3s1
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.20.4-k3s1
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.20.4-k3s1
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.20.4-k3s1
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.20.4-k3s1
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.20.4-k3s1
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.20.4-k3s1
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.20.4-k3s1
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.20.4-k3s1
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.20.4-k3s1
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.20.4-k3s1
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.20.4-k3s1
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.20.4-k3s1
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.20.4-k3s1
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.20.4-k3s1
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.20.4-k3s1
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.20.4-k3s1
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.20.4-k3s1
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.20.4-k3s1
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.20.4-k3s1
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.20.4-k3s1
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.20.4-k3s1
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.20.4-k3s1
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.20.4-k3s1
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.20.4-k3s1
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.20.4-k3s1
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34