Update k8s to v1.17.3

This commit is contained in:
Erik Wilson 2020-02-13 17:18:16 -07:00
parent cdab19b09a
commit d86446d737
38 changed files with 777 additions and 310 deletions

52
go.mod
View File

@ -31,31 +31,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.2-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.2-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.2-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.2-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.2-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.2-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.2-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.2-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.2-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.2-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.2-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.2-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.2-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.2-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.2-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.2-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.2-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.2-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.17.2-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.2-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.2-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.17.2-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.2-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.17.2-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.17.2-k3s1
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.3-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.3-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.3-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.3-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.3-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.3-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.3-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.3-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.3-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.3-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.3-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.3-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.3-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.3-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.3-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.3-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.3-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.3-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.17.3-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.3-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.3-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.17.3-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.3-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.17.3-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.17.3-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)
@ -115,7 +115,7 @@ require (
google.golang.org/grpc v1.25.1
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 // indirect
gopkg.in/yaml.v2 v2.2.7
gopkg.in/yaml.v2 v2.2.8
k8s.io/api v0.17.0
k8s.io/apimachinery v0.17.0
k8s.io/apiserver v0.0.0

88
go.sum
View File

@ -722,49 +722,49 @@ github.com/rancher/helm-controller v0.4.1 h1:AbI3zXy2Qtw4e4A6IkxKozgnm6T3KKnUo6E
github.com/rancher/helm-controller v0.4.1/go.mod h1:194LHuZRrxcD82bG1rJtOWsw98U4JbPhDWqvL7l3PAw=
github.com/rancher/kine v0.3.4 h1:06QZ06zXCOoh1Vp/eT3eoOG9isIupoo8XJe0TvfK2Tw=
github.com/rancher/kine v0.3.4/go.mod h1:xEMl0tLCva9/9me7mXJ3m9Vo6yqHgC4OU3NiK4CPrGQ=
github.com/rancher/kubernetes v1.17.2-k3s1 h1:2Gp1NFkq6wyx5NqWFI+bjkAqbDT84jB8DUYVpGnAuW8=
github.com/rancher/kubernetes v1.17.2-k3s1/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.2-k3s1 h1:AuvaLsLh6T25e8wkpIjZLxj00rVsDqLKzLokMujXQ90=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.2-k3s1/go.mod h1:D7sf1F7GAHEK5DOn11dKP5J/QJvUShU2AopXe/AXghk=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.2-k3s1 h1:XX2ScJkAneXI9Ei0vtWzxAbg4ft9+nnQyIoeveS8w90=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.2-k3s1/go.mod h1:HjqmpMjOO0RGrZVKCmHhQNgz6nzkzcEQ+TEIwzp14A0=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.2-k3s1 h1:kCEPvXhgbje1xcoaeqFdvKR1tn0s/bcSCKwyy5A1I/w=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.2-k3s1/go.mod h1:1WXjse1VhndJdimFoqVzt1J+RlmhvGxU8CMVWuAuV4k=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.2-k3s1 h1:plcr57VVwXMJeD6mI/x3tm8y6NFCYrjOcAMIhontQKg=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.2-k3s1/go.mod h1:3xmck1xz/FJcHnFUOjon3VC1HCe6TMMBIH8VSEuzcvM=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.2-k3s1 h1:0PiqlpsCWItOrvm/06rP9KJSqqPEYUJC8Hqst9r2ICE=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.2-k3s1/go.mod h1:2j76SpnTiazjWbdidJY0tDtSLe6k0tY2v3vV9TLV6Cg=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.2-k3s1 h1:APDoRaIEzJ+MKc64nZ/taBamrYCRj9nsRyCtClpbGr4=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.2-k3s1/go.mod h1:2yNkwZhzqhHeXaY0SR4OH76H3qwXdjDsZCySPrW9LjI=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.2-k3s1 h1:OHMLJt51ubhS8vfI6renLBz0m9CesQ5k1FA8DwK8r1Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.2-k3s1/go.mod h1:ycFddVBapK98UCwexOuDsvQLD3tfiVdRlXm2jjNP14c=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.2-k3s1 h1:0Pf7/Wmqs1LZlcpcsJqYoLMRqY6pwomDAIdvg0F5+Zw=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.2-k3s1/go.mod h1:Y9cWlhZVXTIx8hPYp4KajB9oNl1dBbba6LJndYpVnQo=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.2-k3s1 h1:UYT2j6qy1v+r3ZbZ/S8zk2zIaGVIWATpC7pBNLPNR08=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.2-k3s1/go.mod h1:2Gsj2VJhAB6dTcnR+841H51LUCyhufQPSzTQWRnevwY=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.2-k3s1 h1:DDqc894cAoo/Sozd4DiQEub9RtDm+5pFLkATTjdcTW4=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.2-k3s1/go.mod h1:cTBtMr/eUcihQ4sFhgiDPlmlFpn1xBY37yQ+zNcT1IE=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.2-k3s1 h1:Qt0G43lbm7dSo4wWqJIIl4T4bSIp8wViYga0gnEtp5w=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.2-k3s1/go.mod h1:za6HqWgDNrKSvaq+Zg8GwyNeZifm/H9eMXr7yLJ+UdA=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.2-k3s1 h1:sBaMw7zGRqFJny6R7Txb5HsJi8wZAOwkhpV4b0ybcUg=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.2-k3s1/go.mod h1:LO9jViQKnx/FqMjlXE9SmKjZ3I4PE/SLsaDWKTlmiRw=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.2-k3s1 h1:atWWD3UQlheciunS/Ht4tgCOThVLHqiIe1Vgw5jK9OE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.2-k3s1/go.mod h1:vh3GqAHrJU/N5vuEaKFmWYb78D7L6fiVBS4Id3OyDSc=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.2-k3s1 h1:6BEHsakvfeC9GiZQLkpXBg/KpONmZO66Xjl8nRa3hjk=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.2-k3s1/go.mod h1:mKHow8zS1GpDcLHlAgghj2BKPOZrpEIHA30lWyRF3gg=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.2-k3s1 h1:3/HoXdSPqx5pDojftwEZoKfnk12GTqpFcZ4aibTq7p8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.2-k3s1/go.mod h1:6SB+e2ragxZ8CPWrgNZ7F9KkCFykx5zQjnLVP2SWQ00=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.2-k3s1 h1:alhteShN7ILI08LIqZ87SjD3cMkhelh843m8vmXaOG0=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.2-k3s1/go.mod h1:8rHmgUeQCsvaVrSy79lQc3DKD5PQp9cJH0DLw2GSAdk=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.2-k3s1 h1:CFJho6IWJupty3Am2EYFTF199B19aIxrkrk9ulAcApQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.2-k3s1/go.mod h1:xacTENgh5ed/fa8fjur271/livetYIFI+P81WK+Ez9s=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.2-k3s1 h1:EhSvJWg2B1TLSGl7ToxVeVtSWRNgLd6pbmfyghuGrtI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.2-k3s1/go.mod h1:oHq6KV/WjiamPxKs2Np7JxcOEwHXfMybRBnLrMsTOHs=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.2-k3s1 h1:21Yt7bYZEIhgSy+78bnGub1qqqo9MnqodZjZ2DmVpro=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.2-k3s1/go.mod h1:GTPm6pl8yIFEzk7bX03iY6kI5Bpt0Qd+BN3ajg/qeUI=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.2-k3s1 h1:u0LtqHRJCsCiMQ29lLtOu/tlAXUZqA8g9opF7Kh3mNc=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.2-k3s1/go.mod h1:e70DTGI+y72YcU8iGDifilyP5uW6sPmBflDEjtiH/p0=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.2-k3s1/go.mod h1:Ey49UYt1h2yQl0P61nfZEo/LZUu78Bj55oBL/VOejW4=
github.com/rancher/kubernetes v1.17.3-k3s1 h1:E69XHLnUlbv+oWwkQcGTlmnjmiXMgUWCf/ebGHwd698=
github.com/rancher/kubernetes v1.17.3-k3s1/go.mod h1:gt28rfzaskIzJ8d82TSJmGrJ0XZD0BBy8TcQvTuCI3w=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.3-k3s1 h1:AO6Y/i02FwusnVYa13pceiC5NGNhxl483PxECLW7mLs=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.3-k3s1/go.mod h1:D7sf1F7GAHEK5DOn11dKP5J/QJvUShU2AopXe/AXghk=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.3-k3s1 h1:BpvB6cEmYqLg6KHyswr31b6008Lg/mKOONI+hBiCbcI=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.3-k3s1/go.mod h1:h8Z8vCtlVWoVWwGDsmirREYT5Ot3kRyY62zQ/L18DR4=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.3-k3s1 h1:yXaJWY5HGZf9IAgShUY3oGOlg8fJjBwoek0Z7o8VgXs=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.3-k3s1/go.mod h1:aSKJ8oirsor4diiLHY6P9ygjPwLQXdR3awaqBbnJX/c=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.3-k3s1 h1:JZ6fznSyRCJpV8mKGuaR+mfQtlt9COwdN2cc+rsKbR4=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.3-k3s1/go.mod h1:Hj5c4LCEaX6W7RzDS4pCxUO/wjX3RqmhNLeb5rba/qs=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.3-k3s1 h1:BPOr+PbUvCdwkZ+eDbs5sx7zSCsONC2utIgpM3EijRk=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.3-k3s1/go.mod h1:2j76SpnTiazjWbdidJY0tDtSLe6k0tY2v3vV9TLV6Cg=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.3-k3s1 h1:cQDLmPcSfKpEILv0aaGE4NvVBR/uWMF8FHIXogAg6XI=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.3-k3s1/go.mod h1:2yNkwZhzqhHeXaY0SR4OH76H3qwXdjDsZCySPrW9LjI=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.3-k3s1 h1:dSJqd0PtxZ8TV1AasBMLXi5i8/vN5VPpcz99dIwpiMQ=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.3-k3s1/go.mod h1:ycFddVBapK98UCwexOuDsvQLD3tfiVdRlXm2jjNP14c=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.3-k3s1 h1:JWxmZkgs5XKaevw7z6G+mqB3HCqrTNebeI9k6NBGd8w=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.3-k3s1/go.mod h1:Y9cWlhZVXTIx8hPYp4KajB9oNl1dBbba6LJndYpVnQo=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.3-k3s1 h1:orNieXSkl5FRE4gf+7TawIo4Pq5kTCJCjGnmng7nuHA=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.3-k3s1/go.mod h1:k2c6AL2BX9sZq1YlrdnWj8UOSzlYNGXyompiqK2qt6k=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.3-k3s1 h1:4VzaYIx2VnIOP+WMNHmkvInWRO3EOBhTY2PcUAr9S6A=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.3-k3s1/go.mod h1:cTBtMr/eUcihQ4sFhgiDPlmlFpn1xBY37yQ+zNcT1IE=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.3-k3s1 h1:6nMZhXX+/Rh/yIf9ctWfpYBdEVl3ZNivW/7VHxouwrU=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.3-k3s1/go.mod h1:MFtntPG6S7PSTqf06WHsjhdZ/gOTxfzFgNEzfhO5Uus=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.3-k3s1 h1:ZVPaisklQHWxTOIgn6/TWNJMt+MwJNaM3QgSsZiBtck=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.3-k3s1/go.mod h1:LO9jViQKnx/FqMjlXE9SmKjZ3I4PE/SLsaDWKTlmiRw=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.3-k3s1 h1:58FYIMYw7e7BFMkQPuY98qHkTmhABiHR+cFdN0Eum5c=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.3-k3s1/go.mod h1:vh3GqAHrJU/N5vuEaKFmWYb78D7L6fiVBS4Id3OyDSc=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.3-k3s1 h1:ys+gYp5NgAcnhGK48l/NGK341RlHYT7Kh+LXkPh3v5Y=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.3-k3s1/go.mod h1:mKHow8zS1GpDcLHlAgghj2BKPOZrpEIHA30lWyRF3gg=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.3-k3s1 h1:eQ1/Hy/SkZfstYmIcTB9viEVZikzSVkZ2q00hqbN4Io=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.3-k3s1/go.mod h1:6SB+e2ragxZ8CPWrgNZ7F9KkCFykx5zQjnLVP2SWQ00=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.3-k3s1 h1:VqpDOfnHiW312gngcNyOlDXr5dAE6fn2O0IWnly+O3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.3-k3s1/go.mod h1:8rHmgUeQCsvaVrSy79lQc3DKD5PQp9cJH0DLw2GSAdk=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.3-k3s1 h1:BzcJiG1Ixn83uYb0Iyq6/3ucYlw0/3ydPMmJtIt8sr4=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.3-k3s1/go.mod h1:1f5qLii1ERhwADRUUxPM961i2GpaOJAMXHBcLAEhrdE=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.3-k3s1 h1:NrlhWPZZQGlgO+MurYqm6WbmgVT9r225rEmxpQ3EvjQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.3-k3s1/go.mod h1:oHq6KV/WjiamPxKs2Np7JxcOEwHXfMybRBnLrMsTOHs=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.3-k3s1 h1:RuF2J4k5GFGt5nx+jJzXjG2mkrbfwEgQAruzEPI+pJY=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.3-k3s1/go.mod h1:GTPm6pl8yIFEzk7bX03iY6kI5Bpt0Qd+BN3ajg/qeUI=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.3-k3s1 h1:srUq7MCIY9eiC363R7gpGIUQtlQU4xDjtL1AMcWifmc=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.3-k3s1/go.mod h1:e70DTGI+y72YcU8iGDifilyP5uW6sPmBflDEjtiH/p0=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.3-k3s1/go.mod h1:Ey49UYt1h2yQl0P61nfZEo/LZUu78Bj55oBL/VOejW4=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
@ -1149,6 +1149,8 @@ gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=

47
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View File

@ -626,32 +626,18 @@ func trace(args ...interface{}) func() {
func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
// While we need more tokens to fetch, do it.
for {
// Check if we really need to fetch more tokens.
need_more_tokens := false
if parser.tokens_head == len(parser.tokens) {
// Queue is empty.
need_more_tokens = true
} else {
// Check if any potential simple key may occupy the head position.
for i := len(parser.simple_keys) - 1; i >= 0; i-- {
simple_key := &parser.simple_keys[i]
if simple_key.token_number < parser.tokens_parsed {
break
}
if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
return false
} else if valid && simple_key.token_number == parser.tokens_parsed {
need_more_tokens = true
break
}
if parser.tokens_head != len(parser.tokens) {
// If queue is non-empty, check if any potential simple key may
// occupy the head position.
head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
if !ok {
break
} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
return false
} else if !valid {
break
}
}
// We are finished.
if !need_more_tokens {
break
}
// Fetch the next token.
if !yaml_parser_fetch_next_token(parser) {
return false
@ -883,6 +869,7 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
return false
}
parser.simple_keys[len(parser.simple_keys)-1] = simple_key
parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
}
return true
}
@ -897,9 +884,10 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
"while scanning a simple key", parser.simple_keys[i].mark,
"could not find expected ':'")
}
// Remove the key from the stack.
parser.simple_keys[i].possible = false
delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
}
// Remove the key from the stack.
parser.simple_keys[i].possible = false
return true
}
@ -930,7 +918,9 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
if parser.flow_level > 0 {
parser.flow_level--
parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
last := len(parser.simple_keys) - 1
delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
parser.simple_keys = parser.simple_keys[:last]
}
return true
}
@ -1007,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
// Initialize the simple key stack.
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
parser.simple_keys_by_tok = make(map[int]int)
// A simple key is allowed at the beginning of the stream.
parser.simple_key_allowed = true
@ -1310,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
// Remove the simple key.
simple_key.possible = false
delete(parser.simple_keys_by_tok, simple_key.token_number)
// A simple key cannot follow another simple key.
parser.simple_key_allowed = false

1
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View File

@ -579,6 +579,7 @@ type yaml_parser_t struct {
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff

View File

@ -20,7 +20,6 @@ go_library(
deps = [
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",

View File

@ -27,11 +27,10 @@ import (
"sync"
"time"
restful "github.com/emicklei/go-restful"
"github.com/emicklei/go-restful"
"k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/features"
@ -301,7 +300,8 @@ func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, comp
func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component, contentType string, httpCode, respSize int, elapsed time.Duration) {
reportedVerb := cleanVerb(verb, req)
dryRun := cleanDryRun(req.URL)
client := cleanUserAgent(utilnet.GetHTTPClient(req))
// blank out client string here, in order to avoid cardinality issues
client := ""
elapsedMicroseconds := float64(elapsed / time.Microsecond)
elapsedSeconds := elapsed.Seconds()
requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc()

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "17"
gitVersion = "v1.17.2-k3s1"
gitCommit = "2f70330e5ae4f8c1a275f7742a26f26bf890ae1c"
gitVersion = "v1.17.3-k3s1"
gitCommit = "4a5cd0af6fbc1ec1c32689dc45774da8957afad4"
gitTreeState = "clean"
buildDate = "2020-01-21T21:23:52Z"
buildDate = "2020-02-13T23:57:05Z"
)

View File

@ -98,6 +98,10 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.
}
instanceID, err := instances.InstanceID(ctx, nodeName)
if err != nil {
if err == NotImplemented {
return "", err
}
return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
}
return cloud.ProviderName() + "://" + instanceID, nil

View File

@ -171,8 +171,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "17"
gitVersion = "v1.17.2-k3s1"
gitCommit = "2f70330e5ae4f8c1a275f7742a26f26bf890ae1c"
gitVersion = "v1.17.3-k3s1"
gitCommit = "4a5cd0af6fbc1ec1c32689dc45774da8957afad4"
gitTreeState = "clean"
buildDate = "2020-01-21T21:23:52Z"
buildDate = "2020-02-13T23:57:05Z"
)

View File

@ -154,8 +154,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=

View File

@ -21,8 +21,10 @@ import (
"strconv"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
@ -198,3 +200,45 @@ func deepCopyStringMap(m map[string]string) map[string]string {
}
return ret
}
// Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see http://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]core.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = ""
out.VolumeClaimTemplates[i].Kind = ""
}
}
return nil
}
// Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see http://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]corev1.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = "v1"
out.VolumeClaimTemplates[i].Kind = "PersistentVolumeClaim"
}
}
return nil
}

View File

@ -250,16 +250,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*v1.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope)
}); err != nil {
@ -300,6 +290,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1.DaemonSetSpec)(nil), (*apps.DaemonSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_DaemonSetSpec_To_apps_DaemonSetSpec(a.(*v1.DaemonSetSpec), b.(*apps.DaemonSetSpec), scope)
}); err != nil {
@ -320,6 +315,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
return nil
}
@ -1155,11 +1155,6 @@ func autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1.StatefulSetSp
return nil
}
// Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec is an autogenerated conversion function.
func Convert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
return autoConvert_v1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s)
}
func autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
@ -1178,11 +1173,6 @@ func autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSet
return nil
}
// Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec is an autogenerated conversion function.
func Convert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1.StatefulSetSpec, s conversion.Scope) error {
return autoConvert_apps_StatefulSetSpec_To_v1_StatefulSetSpec(in, out, s)
}
func autoConvert_v1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
if err := metav1.Convert_int64_To_Pointer_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil {
return err

View File

@ -20,11 +20,14 @@ import (
"fmt"
appsv1beta1 "k8s.io/api/apps/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
apps "k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/core"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
@ -76,3 +79,45 @@ func Convert_v1beta1_ScaleStatus_To_autoscaling_ScaleStatus(in *appsv1beta1.Scal
}
return nil
}
// Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see http://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]core.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = ""
out.VolumeClaimTemplates[i].Kind = ""
}
}
return nil
}
// Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta1.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see http://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]corev1.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = "v1"
out.VolumeClaimTemplates[i].Kind = "PersistentVolumeClaim"
}
}
return nil
}

View File

@ -211,16 +211,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1beta1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1beta1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1beta1.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*v1beta1.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope)
}); err != nil {
@ -241,6 +231,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta1.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta1.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleStatus_To_v1beta1_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta1.ScaleStatus), scope)
}); err != nil {
@ -251,6 +246,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1beta1.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1beta1.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
return nil
}
@ -835,11 +835,6 @@ func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.Sta
return nil
}
// Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec is an autogenerated conversion function.
func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta1.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
return autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s)
}
func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta1.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
@ -858,11 +853,6 @@ func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.Statef
return nil
}
// Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec is an autogenerated conversion function.
func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta1.StatefulSetSpec, s conversion.Scope) error {
return autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in, out, s)
}
func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta1.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration))
out.Replicas = in.Replicas

View File

@ -21,12 +21,14 @@ import (
"strconv"
appsv1beta2 "k8s.io/api/apps/v1beta2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/apis/apps"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
@ -255,3 +257,45 @@ func deepCopyStringMap(m map[string]string) map[string]string {
}
return ret
}
// Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *appsv1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see http://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]core.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = ""
out.VolumeClaimTemplates[i].Kind = ""
}
}
return nil
}
// Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec augments auto-conversion to preserve < 1.17 behavior
// setting apiVersion/kind in nested persistent volume claim objects.
func Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *appsv1beta2.StatefulSetSpec, s conversion.Scope) error {
if err := autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in, out, s); err != nil {
return err
}
// set APIVersion/Kind to behave the same as reflective conversion < 1.17.
// see http://issue.k8s.io/87583
if out.VolumeClaimTemplates != nil {
// copy so we don't modify the input
templatesCopy := make([]corev1.PersistentVolumeClaim, len(out.VolumeClaimTemplates))
copy(templatesCopy, out.VolumeClaimTemplates)
out.VolumeClaimTemplates = templatesCopy
for i := range out.VolumeClaimTemplates {
out.VolumeClaimTemplates[i].APIVersion = "v1"
out.VolumeClaimTemplates[i].Kind = "PersistentVolumeClaim"
}
}
return nil
}

View File

@ -271,16 +271,6 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1beta2.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1beta2.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1beta2.StatefulSetStatus)(nil), (*apps.StatefulSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(a.(*v1beta2.StatefulSetStatus), b.(*apps.StatefulSetStatus), scope)
}); err != nil {
@ -321,6 +311,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*apps.StatefulSetSpec)(nil), (*v1beta2.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(a.(*apps.StatefulSetSpec), b.(*v1beta2.StatefulSetSpec), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*autoscaling.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_autoscaling_ScaleStatus_To_v1beta2_ScaleStatus(a.(*autoscaling.ScaleStatus), b.(*v1beta2.ScaleStatus), scope)
}); err != nil {
@ -351,6 +346,11 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddConversionFunc((*v1beta2.StatefulSetSpec)(nil), (*apps.StatefulSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(a.(*v1beta2.StatefulSetSpec), b.(*apps.StatefulSetSpec), scope)
}); err != nil {
return err
}
return nil
}
@ -1251,11 +1251,6 @@ func autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta2.Sta
return nil
}
// Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec is an autogenerated conversion function.
func Convert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in *v1beta2.StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error {
return autoConvert_v1beta2_StatefulSetSpec_To_apps_StatefulSetSpec(in, out, s)
}
func autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta2.StatefulSetSpec, s conversion.Scope) error {
if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil {
return err
@ -1274,11 +1269,6 @@ func autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.Statef
return nil
}
// Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec is an autogenerated conversion function.
func Convert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in *apps.StatefulSetSpec, out *v1beta2.StatefulSetSpec, s conversion.Scope) error {
return autoConvert_apps_StatefulSetSpec_To_v1beta2_StatefulSetSpec(in, out, s)
}
func autoConvert_v1beta2_StatefulSetStatus_To_apps_StatefulSetStatus(in *v1beta2.StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error {
if err := metav1.Convert_int64_To_Pointer_int64(&in.ObservedGeneration, &out.ObservedGeneration, s); err != nil {
return err

View File

@ -357,11 +357,16 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod
providerID, err := cloudprovider.GetInstanceProviderID(ctx, cnc.cloud, types.NodeName(curNode.Name))
if err == nil {
curNode.Spec.ProviderID = providerID
} else if err == cloudprovider.NotImplemented {
// if the cloud provider being used does not support provider IDs,
// we can safely continue since we will attempt to set node
// addresses given the node name in getNodeAddressesByProviderIDOrName
klog.Warningf("cloud provider does not set node provider ID, using node name to discover node %s", node.Name)
} else {
// we should attempt to set providerID on curNode, but
// we can continue if we fail since we will attempt to set
// node addresses given the node name in getNodeAddressesByProviderIDOrName
klog.Errorf("failed to set node provider id: %v", err)
// if the cloud provider being used supports provider IDs, we want
// to propagate the error so that we re-try in the future; if we
// do not, the taint will be removed, and this will not be retried
return err
}
}

View File

@ -5,6 +5,7 @@ go_library(
srcs = [
"endpointset.go",
"endpointslice_controller.go",
"endpointslice_tracker.go",
"reconciler.go",
"utils.go",
],
@ -49,6 +50,7 @@ go_test(
name = "go_default_test",
srcs = [
"endpointslice_controller_test.go",
"endpointslice_tracker_test.go",
"reconciler_test.go",
"utils_test.go",
],

View File

@ -63,7 +63,7 @@ const (
func NewController(podInformer coreinformers.PodInformer,
serviceInformer coreinformers.ServiceInformer,
nodeInformer coreinformers.NodeInformer,
esInformer discoveryinformers.EndpointSliceInformer,
endpointSliceInformer discoveryinformers.EndpointSliceInformer,
maxEndpointsPerSlice int32,
client clientset.Interface,
) *Controller {
@ -105,8 +105,15 @@ func NewController(podInformer coreinformers.PodInformer,
c.nodeLister = nodeInformer.Lister()
c.nodesSynced = nodeInformer.Informer().HasSynced
c.endpointSliceLister = esInformer.Lister()
c.endpointSlicesSynced = esInformer.Informer().HasSynced
endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.onEndpointSliceAdd,
UpdateFunc: c.onEndpointSliceUpdate,
DeleteFunc: c.onEndpointSliceDelete,
})
c.endpointSliceLister = endpointSliceInformer.Lister()
c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
c.endpointSliceTracker = newEndpointSliceTracker()
c.maxEndpointsPerSlice = maxEndpointsPerSlice
@ -114,6 +121,7 @@ func NewController(podInformer coreinformers.PodInformer,
client: c.client,
nodeLister: c.nodeLister,
maxEndpointsPerSlice: c.maxEndpointsPerSlice,
endpointSliceTracker: c.endpointSliceTracker,
metricsCache: endpointslicemetrics.NewCache(maxEndpointsPerSlice),
}
c.triggerTimeTracker = endpointutil.NewTriggerTimeTracker()
@ -152,6 +160,10 @@ type Controller struct {
// endpointSlicesSynced returns true if the endpoint slice shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
endpointSlicesSynced cache.InformerSynced
// endpointSliceTracker tracks the list of EndpointSlices and associated
// resource versions expected for each Service. It can help determine if a
// cached EndpointSlice is out of date.
endpointSliceTracker *endpointSliceTracker
// nodeLister is able to list/get nodes and is populated by the
// shared informer passed to NewController
@ -343,6 +355,57 @@ func (c *Controller) onServiceDelete(obj interface{}) {
c.queue.Add(key)
}
// onEndpointSliceAdd queues a sync for the relevant Service for a sync if the
// EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker.
func (c *Controller) onEndpointSliceAdd(obj interface{}) {
endpointSlice := obj.(*discovery.EndpointSlice)
if endpointSlice == nil {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
return
}
if managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// onEndpointSliceUpdate queues a sync for the relevant Service for a sync if
// the EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker or the managed-by value of the EndpointSlice has changed
// from or to this controller.
func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
prevEndpointSlice := obj.(*discovery.EndpointSlice)
endpointSlice := obj.(*discovery.EndpointSlice)
if endpointSlice == nil || prevEndpointSlice == nil {
utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
return
}
if managedByChanged(prevEndpointSlice, endpointSlice) || (managedByController(endpointSlice) && c.endpointSliceTracker.Stale(endpointSlice)) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// onEndpointSliceDelete queues a sync for the relevant Service for a sync if the
// EndpointSlice resource version does not match the expected version in the
// endpointSliceTracker.
func (c *Controller) onEndpointSliceDelete(obj interface{}) {
endpointSlice := getEndpointSliceFromDeleteAction(obj)
if endpointSlice != nil && managedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
c.queueServiceForEndpointSlice(endpointSlice)
}
}
// queueServiceForEndpointSlice attempts to queue the corresponding Service for
// the provided EndpointSlice.
func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) {
key, err := serviceControllerKey(endpointSlice)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for EndpointSlice %+v: %v", endpointSlice, err))
return
}
c.queue.Add(key)
}
func (c *Controller) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
services, err := c.serviceSelectorCache.GetPodServiceMemberships(c.serviceLister, pod)

View File

@ -0,0 +1,123 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpointslice
import (
"sync"
discovery "k8s.io/api/discovery/v1beta1"
"k8s.io/apimachinery/pkg/types"
)
// endpointSliceResourceVersions tracks expected EndpointSlice resource versions
// by EndpointSlice name.
type endpointSliceResourceVersions map[string]string
// endpointSliceTracker tracks EndpointSlices and their associated resource
// versions to help determine if a change to an EndpointSlice has been processed
// by the EndpointSlice controller.
type endpointSliceTracker struct {
// lock protects resourceVersionsByService.
lock sync.Mutex
// resourceVersionsByService tracks the list of EndpointSlices and
// associated resource versions expected for a given Service.
resourceVersionsByService map[types.NamespacedName]endpointSliceResourceVersions
}
// newEndpointSliceTracker creates and initializes a new endpointSliceTracker.
func newEndpointSliceTracker() *endpointSliceTracker {
return &endpointSliceTracker{
resourceVersionsByService: map[types.NamespacedName]endpointSliceResourceVersions{},
}
}
// Has returns true if the endpointSliceTracker has a resource version for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice)
_, ok := rrv[endpointSlice.Name]
return ok
}
// Stale returns true if this endpointSliceTracker does not have a resource
// version for the provided EndpointSlice or it does not match the resource
// version of the provided EndpointSlice.
func (est *endpointSliceTracker) Stale(endpointSlice *discovery.EndpointSlice) bool {
est.lock.Lock()
defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice)
return rrv[endpointSlice.Name] != endpointSlice.ResourceVersion
}
// Update adds or updates the resource version in this endpointSliceTracker for
// the provided EndpointSlice.
func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock()
defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice)
rrv[endpointSlice.Name] = endpointSlice.ResourceVersion
}
// Delete removes the resource version in this endpointSliceTracker for the
// provided EndpointSlice.
func (est *endpointSliceTracker) Delete(endpointSlice *discovery.EndpointSlice) {
est.lock.Lock()
defer est.lock.Unlock()
rrv := est.relatedResourceVersions(endpointSlice)
delete(rrv, endpointSlice.Name)
}
// relatedResourceVersions returns the set of resource versions tracked for the
// Service corresponding to the provided EndpointSlice. If no resource versions
// are currently tracked for this service, an empty set is initialized.
func (est *endpointSliceTracker) relatedResourceVersions(endpointSlice *discovery.EndpointSlice) endpointSliceResourceVersions {
serviceNN := getServiceNN(endpointSlice)
vers, ok := est.resourceVersionsByService[serviceNN]
if !ok {
vers = endpointSliceResourceVersions{}
est.resourceVersionsByService[serviceNN] = vers
}
return vers
}
// getServiceNN returns a namespaced name for the Service corresponding to the
// provided EndpointSlice.
func getServiceNN(endpointSlice *discovery.EndpointSlice) types.NamespacedName {
serviceName, _ := endpointSlice.Labels[discovery.LabelServiceName]
return types.NamespacedName{Name: serviceName, Namespace: endpointSlice.Namespace}
}
// managedByChanged returns true if one of the provided EndpointSlices is
// managed by the EndpointSlice controller while the other is not.
func managedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
return managedByController(endpointSlice1) != managedByController(endpointSlice2)
}
// managedByController returns true if the controller of the provided
// EndpointSlices is the EndpointSlice controller.
func managedByController(endpointSlice *discovery.EndpointSlice) bool {
managedBy, _ := endpointSlice.Labels[discovery.LabelManagedBy]
return managedBy == controllerName
}

View File

@ -40,6 +40,7 @@ type reconciler struct {
client clientset.Interface
nodeLister corelisters.NodeLister
maxEndpointsPerSlice int32
endpointSliceTracker *endpointSliceTracker
metricsCache *metrics.Cache
}
@ -212,6 +213,7 @@ func (r *reconciler) finalize(
}
errs = append(errs, fmt.Errorf("Error creating EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.Update(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
}
}
@ -222,6 +224,7 @@ func (r *reconciler) finalize(
if err != nil {
errs = append(errs, fmt.Errorf("Error updating %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.Update(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("update").Inc()
}
}
@ -231,6 +234,7 @@ func (r *reconciler) finalize(
if err != nil {
errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", endpointSlice.Name, service.Namespace, service.Name, err))
} else {
r.endpointSliceTracker.Delete(endpointSlice)
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
}
}

View File

@ -26,6 +26,8 @@ import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core"
@ -236,6 +238,27 @@ func getSliceToFill(endpointSlices []*discovery.EndpointSlice, numEndpoints, max
return closestSlice
}
// getEndpointSliceFromDeleteAction parses an EndpointSlice from a delete action.
func getEndpointSliceFromDeleteAction(obj interface{}) *discovery.EndpointSlice {
if endpointSlice, ok := obj.(*discovery.EndpointSlice); ok {
// Enqueue all the services that the pod used to be a member of.
// This is the same thing we do when we add a pod.
return endpointSlice
}
// If we reached here it means the pod was deleted but its final state is unrecorded.
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return nil
}
endpointSlice, ok := tombstone.Obj.(*discovery.EndpointSlice)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a EndpointSlice: %#v", obj))
return nil
}
return endpointSlice
}
// addTriggerTimeAnnotation adds a triggerTime annotation to an EndpointSlice
func addTriggerTimeAnnotation(endpointSlice *discovery.EndpointSlice, triggerTime time.Time) {
if endpointSlice.Annotations == nil {
@ -249,6 +272,19 @@ func addTriggerTimeAnnotation(endpointSlice *discovery.EndpointSlice, triggerTim
}
}
// serviceControllerKey returns a controller key for a Service but derived from
// an EndpointSlice.
func serviceControllerKey(endpointSlice *discovery.EndpointSlice) (string, error) {
if endpointSlice == nil {
return "", fmt.Errorf("nil EndpointSlice passed to serviceControllerKey()")
}
serviceName, ok := endpointSlice.Labels[discovery.LabelServiceName]
if !ok || serviceName == "" {
return "", fmt.Errorf("EndpointSlice missing %s label", discovery.LabelServiceName)
}
return fmt.Sprintf("%s/%s", endpointSlice.Namespace, serviceName), nil
}
// endpointSliceEndpointLen helps sort endpoint slices by the number of
// endpoints they contain.
type endpointSliceEndpointLen []*discovery.EndpointSlice

File diff suppressed because one or more lines are too long

View File

@ -118,7 +118,6 @@ type kubenetNetworkPlugin struct {
nonMasqueradeCIDR string
cacheDir string
podCIDRs []*net.IPNet
podGateways []net.IP
}
func NewPlugin(networkPluginDirs []string, cacheDir string) network.NetworkPlugin {
@ -139,7 +138,6 @@ func NewPlugin(networkPluginDirs []string, cacheDir string) network.NetworkPlugi
nonMasqueradeCIDR: "10.0.0.0/8",
cacheDir: cacheDir,
podCIDRs: make([]*net.IPNet, 0),
podGateways: make([]net.IP, 0),
}
}
@ -270,13 +268,11 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
for idx, currentPodCIDR := range podCIDRs {
_, cidr, err := net.ParseCIDR(currentPodCIDR)
if nil != err {
klog.Warningf("Failed to generate CNI network config with cidr %s at indx:%v: %v", currentPodCIDR, idx, err)
klog.Warningf("Failed to generate CNI network config with cidr %s at index:%v: %v", currentPodCIDR, idx, err)
return
}
// create list of ips and gateways
cidr.IP[len(cidr.IP)-1] += 1 // Set bridge address to first address in IPNet
// create list of ips
plugin.podCIDRs = append(plugin.podCIDRs, cidr)
plugin.podGateways = append(plugin.podGateways, cidr.IP)
}
//setup hairpinMode
@ -336,6 +332,9 @@ func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int {
// setup sets up networking through CNI using the given ns/name and sandbox ID.
func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
var ipv4, ipv6 net.IP
var podGateways []net.IP
var podCIDRs []net.IPNet
// Disable DAD so we skip the kernel delay on bringing up new interfaces.
if err := plugin.disableContainerDAD(id); err != nil {
klog.V(3).Infof("Failed to disable DAD in container: %v", err)
@ -360,10 +359,14 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube
// that we get multiple IP addresses in the returned Result structure
if res.IP4 != nil {
ipv4 = res.IP4.IP.IP.To4()
podGateways = append(podGateways, res.IP4.Gateway)
podCIDRs = append(podCIDRs, net.IPNet{IP: ipv4.Mask(res.IP4.IP.Mask), Mask: res.IP4.IP.Mask})
}
if res.IP6 != nil {
ipv6 = res.IP6.IP.IP
podGateways = append(podGateways, res.IP6.Gateway)
podCIDRs = append(podCIDRs, net.IPNet{IP: ipv6.Mask(res.IP6.IP.Mask), Mask: res.IP6.IP.Mask})
}
if ipv4 == nil && ipv6 == nil {
@ -385,7 +388,7 @@ func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kube
}
// configure the ebtables rules to eliminate duplicate packets by best effort
plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr)
plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr, podCIDRs, podGateways)
}
// add the ip to tracked ips
@ -761,7 +764,7 @@ func (plugin *kubenetNetworkPlugin) shaper() bandwidth.Shaper {
}
//TODO: make this into a goroutine and rectify the dedup rules periodically
func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr) {
func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr, podCIDRs []net.IPNet, podGateways []net.IP) {
if plugin.ebtables == nil {
plugin.ebtables = utilebtables.New(plugin.execer)
klog.V(3).Infof("Flushing dedup chain")
@ -790,8 +793,8 @@ func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareA
}
// per gateway rule
for idx, gw := range plugin.podGateways {
klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), gw.String(), plugin.podCIDRs[idx].String())
for idx, gw := range podGateways {
klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), gw.String(), podCIDRs[idx].String())
bIsV6 := netutils.IsIPv6(gw)
IPFamily := "IPv4"
@ -807,9 +810,9 @@ func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareA
return
}
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, plugin.podCIDRs[idx].String(), "-j", "DROP")...)
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, podCIDRs[idx].String(), "-j", "DROP")...)
if err != nil {
klog.Errorf("Failed to ensure packets from podCidr[%v] but has mac address of cbr0 to get dropped. err:%v", plugin.podCIDRs[idx].String(), err)
klog.Errorf("Failed to ensure packets from podCidr[%v] but has mac address of cbr0 to get dropped. err:%v", podCIDRs[idx].String(), err)
return
}
}
@ -861,10 +864,9 @@ func (plugin *kubenetNetworkPlugin) getRangesConfig() string {
createRange := func(thisNet *net.IPNet) string {
template := `
[{
"subnet": "%s",
"gateway": "%s"
"subnet": "%s"
}]`
return fmt.Sprintf(template, thisNet.String(), thisNet.IP.String())
return fmt.Sprintf(template, thisNet.String())
}
ranges := make([]string, len(plugin.podCIDRs))
@ -872,7 +874,7 @@ func (plugin *kubenetNetworkPlugin) getRangesConfig() string {
ranges[idx] = createRange(thisCIDR)
}
//[{range}], [{range}]
// each range is a subnet and a gateway
// each range contains a subnet. gateway will be fetched from cni result
return strings.Join(ranges[:], ",")
}

View File

@ -88,6 +88,7 @@ type Server struct {
auth AuthInterface
host HostInterface
restfulCont containerInterface
metricsBuckets map[string]bool
resourceAnalyzer stats.ResourceAnalyzer
redirectContainerStreaming bool
}
@ -224,6 +225,7 @@ func NewServer(
resourceAnalyzer: resourceAnalyzer,
auth: auth,
restfulCont: &filteringContainer{Container: restful.NewContainer()},
metricsBuckets: make(map[string]bool),
redirectContainerStreaming: redirectContainerStreaming,
}
if auth != nil {
@ -279,14 +281,32 @@ func (s *Server) InstallAuthFilter() {
})
}
// addMetricsBucketMatcher adds a regexp matcher and the relevant bucket to use when
// it matches. Please be aware this is not thread safe and should not be used dynamically
func (s *Server) addMetricsBucketMatcher(bucket string) {
s.metricsBuckets[bucket] = true
}
// getMetricBucket find the appropriate metrics reporting bucket for the given path
func (s *Server) getMetricBucket(path string) string {
root := getURLRootPath(path)
if s.metricsBuckets[root] == true {
return root
}
return "Invalid path"
}
// InstallDefaultHandlers registers the default set of supported HTTP request
// patterns with the restful Container.
func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) {
s.addMetricsBucketMatcher("healthz")
healthz.InstallHandler(s.restfulCont,
healthz.PingHealthz,
healthz.LogHealthz,
healthz.NamedCheck("syncloop", s.syncLoopHealthCheck),
)
s.addMetricsBucketMatcher("pods")
ws := new(restful.WebService)
ws.
Path("/pods").
@ -296,7 +316,14 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) {
Operation("getPods"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("stats")
s.restfulCont.Add(stats.CreateHandlers(statsPath, s.host, s.resourceAnalyzer, enableCAdvisorJSONEndpoints))
s.addMetricsBucketMatcher("metrics")
s.addMetricsBucketMatcher("metrics/cadvisor")
s.addMetricsBucketMatcher("metrics/probes")
s.addMetricsBucketMatcher("metrics/resource/v1alpha1")
s.addMetricsBucketMatcher("metrics/resource")
//lint:ignore SA1019 https://github.com/kubernetes/enhancements/issues/1206
s.restfulCont.Handle(metricsPath, legacyregistry.Handler())
@ -319,6 +346,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) {
compbasemetrics.HandlerFor(r, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}),
)
s.addMetricsBucketMatcher("metrics/resource/v1alpha1")
v1alpha1ResourceRegistry := compbasemetrics.NewKubeRegistry()
v1alpha1ResourceRegistry.CustomMustRegister(stats.NewPrometheusResourceMetricCollector(s.resourceAnalyzer, v1alpha1.Config()))
s.restfulCont.Handle(path.Join(resourceMetricsPathPrefix, v1alpha1.Version),
@ -327,6 +355,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) {
// prober metrics are exposed under a different endpoint
s.addMetricsBucketMatcher("metrics/probes")
p := compbasemetrics.NewKubeRegistry()
compbasemetrics.RegisterProcessStartTime(p.RawRegister)
p.MustRegister(prober.ProberResults)
@ -334,6 +363,7 @@ func (s *Server) InstallDefaultHandlers(enableCAdvisorJSONEndpoints bool) {
compbasemetrics.HandlerFor(p, compbasemetrics.HandlerOpts{ErrorHandling: compbasemetrics.ContinueOnError}),
)
s.addMetricsBucketMatcher("spec")
if enableCAdvisorJSONEndpoints {
ws := new(restful.WebService)
ws.
@ -353,6 +383,7 @@ const pprofBasePath = "/debug/pprof/"
func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
klog.Infof("Adding debug handlers to kubelet server.")
s.addMetricsBucketMatcher("run")
ws := new(restful.WebService)
ws.
Path("/run")
@ -364,6 +395,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Operation("getRun"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("exec")
ws = new(restful.WebService)
ws.
Path("/exec")
@ -381,6 +413,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Operation("getExec"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("attach")
ws = new(restful.WebService)
ws.
Path("/attach")
@ -398,6 +431,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Operation("getAttach"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("portForward")
ws = new(restful.WebService)
ws.
Path("/portForward")
@ -415,6 +449,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Operation("getPortForward"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("logs")
ws = new(restful.WebService)
ws.
Path(logsPath)
@ -427,6 +462,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Param(ws.PathParameter("logpath", "path to the log").DataType("string")))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("containerLogs")
ws = new(restful.WebService)
ws.
Path("/containerLogs")
@ -435,8 +471,10 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Operation("getContainerLogs"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("configz")
configz.InstallHandler(s.restfulCont)
s.addMetricsBucketMatcher("debug")
handlePprofEndpoint := func(req *restful.Request, resp *restful.Response) {
name := strings.TrimPrefix(req.Request.URL.Path, pprofBasePath)
switch name {
@ -452,7 +490,6 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
pprof.Index(resp, req.Request)
}
}
// Setup pprof handlers.
ws = new(restful.WebService).Path(pprofBasePath)
ws.Route(ws.GET("/{subpath:*}").To(func(req *restful.Request, resp *restful.Response) {
@ -465,6 +502,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
s.restfulCont.Handle("/debug/flags/v", routes.StringFlagPutHandler(logs.GlogSetter))
// The /runningpods endpoint is used for testing only.
s.addMetricsBucketMatcher("runningpods")
ws = new(restful.WebService)
ws.
Path("/runningpods/").
@ -474,6 +512,7 @@ func (s *Server) InstallDebuggingHandlers(criHandler http.Handler) {
Operation("getRunningPods"))
s.restfulCont.Add(ws)
s.addMetricsBucketMatcher("cri")
if criHandler != nil {
s.restfulCont.Handle("/cri/", criHandler)
}
@ -485,6 +524,14 @@ func (s *Server) InstallDebuggingDisabledHandlers() {
http.Error(w, "Debug endpoints are disabled.", http.StatusMethodNotAllowed)
})
s.addMetricsBucketMatcher("run")
s.addMetricsBucketMatcher("exec")
s.addMetricsBucketMatcher("attach")
s.addMetricsBucketMatcher("portForward")
s.addMetricsBucketMatcher("containerLogs")
s.addMetricsBucketMatcher("runningpods")
s.addMetricsBucketMatcher("pprof")
s.addMetricsBucketMatcher("logs")
paths := []string{
"/run/", "/exec/", "/attach/", "/portForward/", "/containerLogs/",
"/runningpods/", pprofBasePath, logsPath}
@ -801,10 +848,10 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp
proxyStream(response.ResponseWriter, request.Request, url)
}
// trimURLPath trims a URL path.
// getURLRootPath trims a URL path.
// For paths in the format of "/metrics/xxx", "metrics/xxx" is returned;
// For all other paths, the first part of the path is returned.
func trimURLPath(path string) string {
func getURLRootPath(path string) string {
parts := strings.SplitN(strings.TrimPrefix(path, "/"), "/", 3)
if len(parts) == 0 {
return path
@ -852,7 +899,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
serverType = "readwrite"
}
method, path := req.Method, trimURLPath(req.URL.Path)
method, path := req.Method, s.getMetricBucket(req.URL.Path)
longRunning := strconv.FormatBool(isLongRunningRequest(path))

View File

@ -21,12 +21,14 @@ import (
"time"
ktypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/scheduler/util"
)
// PodBackoffMap is a structure that stores backoff related information for pods
type PodBackoffMap struct {
// lock for performing actions on this PodBackoffMap
lock sync.RWMutex
lock sync.RWMutex
clock util.Clock
// initial backoff duration
initialDuration time.Duration
// maximal backoff duration
@ -38,8 +40,9 @@ type PodBackoffMap struct {
}
// NewPodBackoffMap creates a PodBackoffMap with initial duration and max duration.
func NewPodBackoffMap(initialDuration, maxDuration time.Duration) *PodBackoffMap {
func NewPodBackoffMap(initialDuration, maxDuration time.Duration, clock util.Clock) *PodBackoffMap {
return &PodBackoffMap{
clock: clock,
initialDuration: initialDuration,
maxDuration: maxDuration,
podAttempts: make(map[ktypes.NamespacedName]int),
@ -91,12 +94,16 @@ func (pbm *PodBackoffMap) ClearPodBackoff(nsPod ktypes.NamespacedName) {
// CleanupPodsCompletesBackingoff execute garbage collection on the pod backoff,
// i.e, it will remove a pod from the PodBackoffMap if
// lastUpdateTime + maxBackoffDuration is before the current timestamp
// lastUpdateTime + maxDuration >> timestamp
// We should wait longer than the maxDuration so that the pod gets a chance to
// (1) move to the active queue and (2) get an schedule attempt.
func (pbm *PodBackoffMap) CleanupPodsCompletesBackingoff() {
pbm.lock.Lock()
defer pbm.lock.Unlock()
for pod, value := range pbm.podLastUpdateTime {
if value.Add(pbm.maxDuration).Before(time.Now()) {
// Here we assume that maxDuration should be enough for a pod to move up the
// active queue and get an schedule attempt.
if value.Add(2 * pbm.maxDuration).Before(pbm.clock.Now()) {
pbm.clearPodBackoff(pod)
}
}
@ -106,7 +113,7 @@ func (pbm *PodBackoffMap) CleanupPodsCompletesBackingoff() {
// and increases its numberOfAttempts by 1
func (pbm *PodBackoffMap) BackoffPod(nsPod ktypes.NamespacedName) {
pbm.lock.Lock()
pbm.podLastUpdateTime[nsPod] = time.Now()
pbm.podLastUpdateTime[nsPod] = pbm.clock.Now()
pbm.podAttempts[nsPod]++
pbm.lock.Unlock()
}

View File

@ -233,7 +233,7 @@ func NewPriorityQueue(
pq := &PriorityQueue{
clock: options.clock,
stop: stop,
podBackoff: NewPodBackoffMap(options.podInitialBackoffDuration, options.podMaxBackoffDuration),
podBackoff: NewPodBackoffMap(options.podInitialBackoffDuration, options.podMaxBackoffDuration, options.clock),
activeQ: heap.NewWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()),
unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()),
nominatedPods: newNominatedPodMap(),

View File

@ -58,10 +58,13 @@ var (
StabilityLevel: metrics.ALPHA,
}, []string{"result"})
// PodScheduleSuccesses counts how many pods were scheduled.
// This metric will be initialized again in Register() to assure the metric is not no-op metric.
PodScheduleSuccesses = scheduleAttempts.With(metrics.Labels{"result": "scheduled"})
// PodScheduleFailures counts how many pods could not be scheduled.
// This metric will be initialized again in Register() to assure the metric is not no-op metric.
PodScheduleFailures = scheduleAttempts.With(metrics.Labels{"result": "unschedulable"})
// PodScheduleErrors counts how many pods could not be scheduled due to a scheduler error.
// This metric will be initialized again in Register() to assure the metric is not no-op metric.
PodScheduleErrors = scheduleAttempts.With(metrics.Labels{"result": "error"})
SchedulingLatency = metrics.NewSummaryVec(
&metrics.SummaryOpts{
@ -341,6 +344,9 @@ func Register() {
legacyregistry.MustRegister(metric)
}
volumeschedulingmetrics.RegisterVolumeSchedulingMetrics()
PodScheduleSuccesses = scheduleAttempts.With(metrics.Labels{"result": "scheduled"})
PodScheduleFailures = scheduleAttempts.With(metrics.Labels{"result": "unschedulable"})
PodScheduleErrors = scheduleAttempts.With(metrics.Labels{"result": "error"})
})
}

View File

@ -335,6 +335,8 @@ func New(client clientset.Interface,
configProducerArgs: &frameworkplugins.ConfigProducerArgs{},
}
metrics.Register()
var sched *Scheduler
source := options.schedulerAlgorithmSource
switch {
@ -366,7 +368,6 @@ func New(client clientset.Interface,
default:
return nil, fmt.Errorf("unsupported algorithm source: %v", source)
}
metrics.Register()
// Additional tweaks to the config produced by the configurator.
sched.Recorder = recorder
sched.DisablePreemption = options.disablePreemption

View File

@ -48,23 +48,16 @@ func GetPodFullName(pod *v1.Pod) string {
return pod.Name + "_" + pod.Namespace
}
// GetPodStartTime returns start time of the given pod.
// GetPodStartTime returns start time of the given pod or current timestamp
// if it hasn't started yet.
func GetPodStartTime(pod *v1.Pod) *metav1.Time {
if pod.Status.StartTime != nil {
return pod.Status.StartTime
}
// Should not reach here as the start time of a running time should not be nil
// Return current timestamp as the default value.
// This will not affect the calculation of earliest timestamp of all the pods on one node,
// because current timestamp is always after the StartTime of any pod in good state.
klog.Errorf("pod.Status.StartTime is nil for pod %s. Should not reach here.", pod.Name)
// Assumed pods and bound pods that haven't started don't have a StartTime yet.
return &metav1.Time{Time: time.Now()}
}
// lessFunc is a function that receives two items and returns true if the first
// item should be placed before the second one when the list is sorted.
type lessFunc = func(item1, item2 interface{}) bool
// GetEarliestPodStartTime returns the earliest start time of all pods that
// have the highest priority among all victims.
func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time {

View File

@ -3465,6 +3465,18 @@ func getPortSets(annotation string) (ports *portSets) {
return
}
// This function is useful in extracting the security group list from annotation
func getSGListFromAnnotation(annotatedSG string) []string {
sgList := []string{}
for _, extraSG := range strings.Split(annotatedSG, ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
return sgList
}
// buildELBSecurityGroupList returns list of SecurityGroups which should be
// attached to ELB created by a service. List always consist of at least
// 1 member which is an SG created for this service or a SG from the Global config.
@ -3475,39 +3487,30 @@ func (c *Cloud) buildELBSecurityGroupList(serviceName types.NamespacedName, load
var err error
var securityGroupID string
if c.cfg.Global.ElbSecurityGroup != "" {
securityGroupID = c.cfg.Global.ElbSecurityGroup
} else {
// Create a security group for the load balancer
sgName := "k8s-elb-" + loadBalancerName
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations))
if err != nil {
klog.Errorf("Error creating load balancer security group: %q", err)
return nil, err
}
}
sgList := getSGListFromAnnotation(annotations[ServiceAnnotationLoadBalancerSecurityGroups])
sgList := []string{}
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerSecurityGroups], ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
// The below code changes makes sure that when we have Security Groups specified with the ServiceAnnotationLoadBalancerSecurityGroups
// annotation we don't create a new default Security Groups
// If no Security Groups have been specified with the ServiceAnnotationLoadBalancerSecurityGroups annotation, we add the default one.
if len(sgList) == 0 {
if c.cfg.Global.ElbSecurityGroup != "" {
securityGroupID = c.cfg.Global.ElbSecurityGroup
} else {
// Create a security group for the load balancer
sgName := "k8s-elb-" + loadBalancerName
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
securityGroupID, err = c.ensureSecurityGroup(sgName, sgDescription, getLoadBalancerAdditionalTags(annotations))
if err != nil {
klog.Errorf("Error creating load balancer security group: %q", err)
return nil, err
}
}
sgList = append(sgList, securityGroupID)
}
for _, extraSG := range strings.Split(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups], ",") {
extraSG = strings.TrimSpace(extraSG)
if len(extraSG) > 0 {
sgList = append(sgList, extraSG)
}
}
extraSGList := getSGListFromAnnotation(annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups])
sgList = append(sgList, extraSGList...)
return sgList, nil
}
@ -4315,6 +4318,14 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin
// Collect the security groups to delete
securityGroupIDs := map[string]struct{}{}
annotatedSgSet := map[string]bool{}
annotatedSgsList := getSGListFromAnnotation(service.Annotations[ServiceAnnotationLoadBalancerSecurityGroups])
annotatedExtraSgsList := getSGListFromAnnotation(service.Annotations[ServiceAnnotationLoadBalancerExtraSecurityGroups])
annotatedSgsList = append(annotatedSgsList, annotatedExtraSgsList...)
for _, sg := range annotatedSgsList {
annotatedSgSet[sg] = true
}
for _, sg := range response {
sgID := aws.StringValue(sg.GroupId)
@ -4333,6 +4344,12 @@ func (c *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName strin
continue
}
// This is an extra protection of deletion of non provisioned Security Group which is annotated with `service.beta.kubernetes.io/aws-load-balancer-security-groups`.
if _, ok := annotatedSgSet[sgID]; ok {
klog.Warningf("Ignoring security group with annotation `service.beta.kubernetes.io/aws-load-balancer-security-groups` or service.beta.kubernetes.io/aws-load-balancer-extra-security-groups in %s", service.Name)
continue
}
securityGroupIDs[sgID] = struct{}{}
}

View File

@ -90,7 +90,7 @@ func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (
env.ServiceManagementEndpoint)
}
oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, tenantID, nil)
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, fmt.Errorf("creating the OAuth config: %v", err)
}

View File

@ -38,6 +38,9 @@ const (
// active/expired. If entry doesn't exist in cache, then data is fetched
// using getter, saved in cache and returned
cacheReadTypeUnsafe
// cacheReadTypeForceRefresh force refreshes the cache even if the cache entry
// is not expired
cacheReadTypeForceRefresh
)
// getFunc defines a getter function for timedCache.
@ -120,20 +123,20 @@ func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) {
entry.lock.Lock()
defer entry.lock.Unlock()
// entry exists
if entry.data != nil {
// entry exists and if cache is not force refreshed
if entry.data != nil && crt != cacheReadTypeForceRefresh {
// allow unsafe read, so return data even if expired
if crt == cacheReadTypeUnsafe {
return entry.data, nil
}
// if cached data is not expired, return cached data
if time.Since(entry.createdOn) < t.ttl {
if crt == cacheReadTypeDefault && time.Since(entry.createdOn) < t.ttl {
return entry.data, nil
}
}
// Data is not cached yet or cache data is expired, cache it by getter.
// entry is locked before getting to ensure concurrent gets don't result in
// multiple ARM calls.
// Data is not cached yet, cache data is expired or requested force refresh
// cache it by getter. entry is locked before getting to ensure concurrent
// gets don't result in multiple ARM calls.
data, err := t.getter(key)
if err != nil {
return nil, err

View File

@ -137,11 +137,11 @@ func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, servic
return status, true, nil
}
func getPublicIPDomainNameLabel(service *v1.Service) string {
func getPublicIPDomainNameLabel(service *v1.Service) (string, bool) {
if labelName, found := service.Annotations[ServiceAnnotationDNSLabelName]; found {
return labelName
return labelName, found
}
return ""
return "", false
}
// EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer
@ -513,7 +513,7 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s
return lbStatus.Ingress[0].IP, nil
}
func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel, clusterName string, shouldPIPExisted bool) (*network.PublicIPAddress, error) {
func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domainNameLabel, clusterName string, shouldPIPExisted, foundDNSLabelAnnotation bool) (*network.PublicIPAddress, error) {
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
pip, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
if err != nil {
@ -553,11 +553,13 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
}
klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name)
}
if len(domainNameLabel) == 0 {
pip.PublicIPAddressPropertiesFormat.DNSSettings = nil
} else {
pip.PublicIPAddressPropertiesFormat.DNSSettings = &network.PublicIPAddressDNSSettings{
DomainNameLabel: &domainNameLabel,
if foundDNSLabelAnnotation {
if len(domainNameLabel) == 0 {
pip.PublicIPAddressPropertiesFormat.DNSSettings = nil
} else {
pip.PublicIPAddressPropertiesFormat.DNSSettings = &network.PublicIPAddressDNSSettings{
DomainNameLabel: &domainNameLabel,
}
}
}
@ -812,8 +814,8 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
if err != nil {
return nil, err
}
domainNameLabel := getPublicIPDomainNameLabel(service)
pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel, clusterName, shouldPIPExisted)
domainNameLabel, found := getPublicIPDomainNameLabel(service)
pip, err := az.ensurePublicIPExists(service, pipName, domainNameLabel, clusterName, shouldPIPExisted, found)
if err != nil {
return nil, err
}
@ -1516,8 +1518,8 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
if !isInternal && wantLb {
// Confirm desired public ip resource exists
var pip *network.PublicIPAddress
domainNameLabel := getPublicIPDomainNameLabel(service)
if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel, clusterName, shouldPIPExisted); err != nil {
domainNameLabel, found := getPublicIPDomainNameLabel(service)
if pip, err = az.ensurePublicIPExists(service, desiredPipName, domainNameLabel, clusterName, shouldPIPExisted, found); err != nil {
return nil, err
}
return pip, nil

View File

@ -115,7 +115,7 @@ func (ss *scaleSet) getVMSS(vmssName string, crt cacheReadType) (*compute.Virtua
return vmss, nil
}
klog.V(3).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName)
klog.V(2).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName)
ss.vmssCache.Delete(vmssKey)
vmss, err = getter(vmssName)
if err != nil {
@ -131,19 +131,21 @@ func (ss *scaleSet) getVMSS(vmssName string, crt cacheReadType) (*compute.Virtua
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
getter := func(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) {
var found bool
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
if err != nil {
return "", "", nil, err
return "", "", nil, found, err
}
virtualMachines := cached.(*sync.Map)
if vm, ok := virtualMachines.Load(nodeName); ok {
result := vm.(*vmssVirtualMachinesEntry)
return result.vmssName, result.instanceID, result.virtualMachine, nil
found = true
return result.vmssName, result.instanceID, result.virtualMachine, found, nil
}
return "", "", nil, nil
return "", "", nil, found, nil
}
_, err := getScaleSetVMInstanceID(nodeName)
@ -151,22 +153,24 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, strin
return "", "", nil, err
}
vmssName, instanceID, vm, err := getter(nodeName)
vmssName, instanceID, vm, found, err := getter(nodeName, crt)
if err != nil {
return "", "", nil, err
}
if vm != nil {
if !found {
klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName)
vmssName, instanceID, vm, found, err = getter(nodeName, cacheReadTypeForceRefresh)
if err != nil {
return "", "", nil, err
}
}
if found && vm != nil {
return vmssName, instanceID, vm, nil
}
klog.V(3).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName)
ss.vmssVMCache.Delete(vmssVirtualMachinesKey)
vmssName, instanceID, vm, err = getter(nodeName)
if err != nil {
return "", "", nil, err
}
if vm == nil {
if !found || vm == nil {
return "", "", nil, cloudprovider.InstanceNotFound
}
return vmssName, instanceID, vm, nil
@ -197,7 +201,7 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets.
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt cacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
getter := func(crt cacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
if err != nil {
return nil, false, err
@ -220,21 +224,21 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
return vm, found, nil
}
vm, found, err := getter()
if err != nil {
return nil, err
}
if found {
return vm, nil
}
klog.V(3).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID)
ss.vmssVMCache.Delete(vmssVirtualMachinesKey)
vm, found, err = getter()
vm, found, err := getter(crt)
if err != nil {
return nil, err
}
if !found {
klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID)
vm, found, err = getter(cacheReadTypeForceRefresh)
if err != nil {
return nil, err
}
}
if found && vm != nil {
return vm, nil
}
if !found || vm == nil {
return nil, cloudprovider.InstanceNotFound
}

View File

@ -109,6 +109,26 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
oldCache := make(map[string]vmssVirtualMachinesEntry)
if ss.vmssVMCache != nil {
// get old cache before refreshing the cache
entry, exists, err := ss.vmssVMCache.store.GetByKey(vmssVirtualMachinesKey)
if err != nil {
return nil, err
}
if exists {
cached := entry.(*cacheEntry).data
if cached != nil {
virtualMachines := cached.(*sync.Map)
virtualMachines.Range(func(key, value interface{}) bool {
oldCache[key.(string)] = *value.(*vmssVirtualMachinesEntry)
return true
})
}
}
}
allResourceGroups, err := ss.GetResourceGroups()
if err != nil {
return nil, err
@ -141,8 +161,38 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
virtualMachine: &vm,
lastUpdate: time.Now().UTC(),
})
if _, exists := oldCache[computerName]; exists {
delete(oldCache, computerName)
}
}
}
// add old missing cache data with nil entries to prevent aggressive
// ARM calls during cache invalidation
for name, vmEntry := range oldCache {
// if the nil cache entry has existed for 15 minutes in the cache
// then it should not be added back to the cache
if vmEntry.virtualMachine == nil || time.Since(vmEntry.lastUpdate) > 15*time.Minute {
klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
continue
}
lastUpdate := time.Now().UTC()
if vmEntry.virtualMachine == nil {
// if this is already a nil entry then keep the time the nil
// entry was first created, so we can cleanup unwanted entries
lastUpdate = vmEntry.lastUpdate
}
klog.V(5).Infof("adding old entries to new cache for %s", name)
localCache.Store(name, &vmssVirtualMachinesEntry{
resourceGroup: vmEntry.resourceGroup,
vmssName: vmEntry.vmssName,
instanceID: vmEntry.instanceID,
virtualMachine: nil,
lastUpdate: lastUpdate,
})
}
}
return localCache, nil

44
vendor/modules.txt vendored
View File

@ -1106,9 +1106,9 @@ gopkg.in/square/go-jose.v2/json
gopkg.in/square/go-jose.v2/jwt
# gopkg.in/warnings.v0 v0.1.1
gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.7
# gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v2
# k8s.io/api v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.2-k3s1
# k8s.io/api v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.3-k3s1
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
@ -1152,7 +1152,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.2-k3s1
# k8s.io/apiextensions-apiserver v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.3-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1200,7 +1200,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.2-k3s1
# k8s.io/apimachinery v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.3-k3s1
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
@ -1262,7 +1262,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.2-k3s1
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.3-k3s1
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer
@ -1379,7 +1379,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.2-k3s1
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.3-k3s1
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1392,7 +1392,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.2-k3s1
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.3-k3s1
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk
@ -1585,20 +1585,20 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.2-k3s1
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.3-k3s1
k8s.io/cloud-provider
k8s.io/cloud-provider/node/helpers
k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.2-k3s1
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.3-k3s1
k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.2-k3s1
# k8s.io/code-generator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.3-k3s1
k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1613,7 +1613,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.2-k3s1
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.3-k3s1
k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec
@ -1630,10 +1630,10 @@ k8s.io/component-base/metrics/prometheus/version
k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/version
k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.2-k3s1
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.3-k3s1
k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.2-k3s1
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.3-k3s1
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e
@ -1648,7 +1648,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0
k8s.io/klog
# k8s.io/kube-aggregator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.2-k3s1
# k8s.io/kube-aggregator v0.17.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.3-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1676,7 +1676,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.2-k3s1
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.3-k3s1
k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
k8s.io/kube-openapi/pkg/aggregator
@ -1687,12 +1687,12 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.2-k3s1
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.3-k3s1
k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.2-k3s1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.3-k3s1
k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.2-k3s1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.3-k3s1
k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate
@ -1768,11 +1768,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.2-k3s1
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.3-k3s1
k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.17.2-k3s1
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.17.3-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2510,7 +2510,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.2-k3s1
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.3-k3s1
k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth
@ -2519,7 +2519,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.2-k3s1
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.3-k3s1
k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2