mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Update Kubernetes to v1.19.4-k3s1
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
parent
10474638d7
commit
5896fb06c2
52
go.mod
52
go.mod
@ -32,31 +32,31 @@ replace (
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.3-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.3-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.3-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.3-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.3-k3s1
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.4-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.4-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.4-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.4-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.4-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.4-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.4-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.4-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.4-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.4-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.4-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.4-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.4-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.4-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.4-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.4-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.4-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.4-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.4-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.4-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.4-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.4-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.4-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.4-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.4-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
@ -113,6 +113,6 @@ require (
|
||||
k8s.io/component-base v0.19.0
|
||||
k8s.io/cri-api v0.19.0
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubernetes v1.19.3
|
||||
k8s.io/kubernetes v1.19.4
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
86
go.sum
86
go.sum
@ -673,49 +673,49 @@ github.com/rancher/helm-controller v0.7.3 h1:WTQHcNF2vl9w6Xd1eBtXDe0JUsYMFFstqX9
|
||||
github.com/rancher/helm-controller v0.7.3/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
|
||||
github.com/rancher/kine v0.5.1 h1:pBAD9LVU3oCuRWzY6lKEVqhNi1nZlJV3bbD4G2mrE1c=
|
||||
github.com/rancher/kine v0.5.1/go.mod h1:NoqDMfN0Q+Wu23Kk3MfXfgLO2fE6abLaetejZs9HAYo=
|
||||
github.com/rancher/kubernetes v1.19.3-k3s1 h1:Tfr1qShnWaNGx4kyBSW5A9rvISgHjEg0KRvvZIV5Zpc=
|
||||
github.com/rancher/kubernetes v1.19.3-k3s1/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1 h1:+C1BPPjbCfFFcStBNUJ1gqIDYxdkvbKuZXm3CTQXFxY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1/go.mod h1:Y4VjjNur38HL6/QxaTVK2yno1zjEQlvcvwbbRQs2DtQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1 h1:+KJuGNziYsqEW83VADyz9kjc+ekmpktzqdPYnEmxtss=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1/go.mod h1:BvtZU215FgO19Oy19K6h8qwajFfjxYqGewgjuYHWGRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1 h1:2NbzNTnTh8I8emvASDspv4dPwUBmbAC7aitpuYp32rM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1/go.mod h1:4qgwPPTQvmc3E4Ub+c6I9LSsdbujYP3pIQEGuIVy8oQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1 h1:a6syigjFFawAUcEFeeXhRwaBG9Tl03hK4yC3MT6JUmQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1/go.mod h1:lpK+uXhJTVOwW6SDiSQiL0LaQaBktrM23VG489uC/U0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1 h1:oh0VerzIG3uYnFLExwHnMYK0GF5O7vVQWZAi7FDE0x4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1/go.mod h1:twd45pbv7psOvyGxI8eABhpeoXWW3bCX6aB5NVS6TXs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1 h1:LBUD+DgcZsFqKzQtm4LLAj/e4qaPVepQNGT4PGCcFl0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1/go.mod h1:RHmuC9yMUmS4ypsaBCH2s9PoG2BD29/60QU9yywYWPo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1 h1:qEJ4M89Ov58KQnE9fJbiwa6423BQ2rdECc8+raY86jw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1/go.mod h1:vXBe7m69RSxIR/m6bm820O+WYUJHP9OXtEyTZf3twRo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1 h1:iOMP0NuFsbKZN1ndPQE+We9ssgUevqyJ/8mMDUiGrJM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1/go.mod h1:kEUR4nHaGTacDcHrY2P4IriUdykXxPuwBg7picxa+gk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1 h1:S3BTDilXxRnRpPGYMYIocfFNXyVpHREhmy8eJYIz6dk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1/go.mod h1:CqfZDv+BcdomYCAbV0kiIw9wyBW1Fnf/iDHBd0jEWW8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1 h1:tMyMGhwLnL3mimx9YN8T6uuPAZyv0hmYOdTgWG3Xi04=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1/go.mod h1:jR+bJp7erYNUmcS7lWDm404aFVonltWE56LV8CuqKyg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1 h1:1eeJYqrjk28d+60wR+5/tmrAzz2OybjohX1T9QRFVwg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1/go.mod h1:5fTlfaqG0oYYb80kk4fHpqinBBSFEZb3EqHwdvNdiGQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1 h1:RxVX1nPPdVfZVP/Viz3n1rd9/zU6CQICAhfNgDwNzN4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1/go.mod h1:adA9bBHXD+K7tPn7kTpjQ3VcUzK6PFgAdNEofr4fEx4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1 h1:ZMQsAv6ib3rOvRS0eeuG7IpNfjH6V4Jb0Xt3rwFo/5M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1/go.mod h1:2k5w33TDDiX5MW45QFLk1KqByb4Su/g73InDkoC1xls=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1 h1:ceFawy3+DdCrTCuIr90VJU+ALvnUdOxTewgngJWXkmI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1/go.mod h1:6CzB8GMVD+ZlCXZjHiqccHpC9FFlRTCz+rHd176Lxfc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1 h1:xwYRs1y6M993NcixnQPlv8p++T7mzCUGGcMuH0GBOh8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1 h1:gv249AZOI7rNXj0ODJB2sqwirYG4+4Rczp3RIltGwWc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1/go.mod h1:23iT4D9thFRxYGZ9DOa7zQ4ZyrFHyp+Bz5dXiruHNwI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1 h1:/YLM38Ve+OsLaVwli6uSn/CnisDRYS6KmDBMO/IC4nM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1/go.mod h1:jF57QafZUIyhkboHp3NUlPdpozl8NA4uvyG5FdjbEbU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1 h1:nQN+E86Ar9omkk3fSXHQREEpAW/lUHAECzU8jxy3iyM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1/go.mod h1:xgsxxK8T8eljLwrh87vAyytjDFMbBsrM95vgHrf1OUg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1 h1:9q1Ek7H0/ESteqgdeAYvnonnNeKfii8xrD7eHtLcR9M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1/go.mod h1:iv4u51XYDkRdyvp7BBP+KuQ+ZHjEjoCECFVzMJBDGZA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1 h1:doNDlT3AR7qoN0l3DOqSQeXhZ1si7lKsa/smvPlXX20=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1/go.mod h1:HZHgmugH8oA4ZxTPt9R8HYJBWEeLDegmaGoPo3HDK3I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.3-k3s1/go.mod h1:tl3x2SPSVsIqnioD4z87jXFemilbRh1EYDm3KirMsjI=
|
||||
github.com/rancher/kubernetes v1.19.4-k3s1 h1:x+DMUJas/ubT2SW0369d9mERNrICEiCveRq7D9+8wqI=
|
||||
github.com/rancher/kubernetes v1.19.4-k3s1/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.4-k3s1 h1:S/5pyKhFkgTMGufa/wL4tD365Wo2otBN4cbI836z83s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.4-k3s1/go.mod h1:Y4VjjNur38HL6/QxaTVK2yno1zjEQlvcvwbbRQs2DtQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.4-k3s1 h1:D30UvqLyW0tMWjtjLCAaYWZOdfCuZq5xj/65KuCvgdo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.4-k3s1/go.mod h1:BvtZU215FgO19Oy19K6h8qwajFfjxYqGewgjuYHWGRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.4-k3s1 h1:0ZsBwkNlJsN43kTejquiur28TEd0bMrlnul0vL7C7oM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.4-k3s1/go.mod h1:4qgwPPTQvmc3E4Ub+c6I9LSsdbujYP3pIQEGuIVy8oQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.4-k3s1 h1:V6zGrPHSalrVXyDTjQ+eg01jVuSD9wexnCZaZklvhGM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.4-k3s1/go.mod h1:lpK+uXhJTVOwW6SDiSQiL0LaQaBktrM23VG489uC/U0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.4-k3s1 h1:vYXjNFj6V/UbQe2/YP8XSvhNFO7Sq8oFRVJRxwvj/y0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.4-k3s1/go.mod h1:twd45pbv7psOvyGxI8eABhpeoXWW3bCX6aB5NVS6TXs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.4-k3s1 h1:+NYTDYq+L5h8qVIhKH5E5ETlf1S3or8cVzBzr3oCZ1o=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.4-k3s1/go.mod h1:RHmuC9yMUmS4ypsaBCH2s9PoG2BD29/60QU9yywYWPo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.4-k3s1 h1:qRqfJhJsam9YvDQW+q11LBRRb/NtjpRwGN80HhiXZKo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.4-k3s1/go.mod h1:vXBe7m69RSxIR/m6bm820O+WYUJHP9OXtEyTZf3twRo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.4-k3s1 h1:nzW6wYVzpBESgTHKIyD0Sovfg5EKmIPmeL9ypydJjl4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.4-k3s1/go.mod h1:kEUR4nHaGTacDcHrY2P4IriUdykXxPuwBg7picxa+gk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.4-k3s1 h1:l+TzKc9mZ6N6GWzD47aaQ7juQ0Et3v5PpCuTlRJqquY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.4-k3s1/go.mod h1:CqfZDv+BcdomYCAbV0kiIw9wyBW1Fnf/iDHBd0jEWW8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.4-k3s1 h1:z+B1xn2iH2nqpQIJZWK+uoTkR9i1KrT/qye12rdyhg0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.4-k3s1/go.mod h1:jR+bJp7erYNUmcS7lWDm404aFVonltWE56LV8CuqKyg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.4-k3s1 h1:/X2Vs7rOURDyTmy8aEUZP6X8u7k0/kjmA5yzasExzS4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.4-k3s1/go.mod h1:5fTlfaqG0oYYb80kk4fHpqinBBSFEZb3EqHwdvNdiGQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.4-k3s1 h1:pcRcwhEQXf80Ul9EIS2gZdDEuzZlndPNkDdundaIC80=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.4-k3s1/go.mod h1:adA9bBHXD+K7tPn7kTpjQ3VcUzK6PFgAdNEofr4fEx4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.4-k3s1 h1:eMc7KHot49rvjqHQ/LESZ5MF2nkLj3egrD7yA8vvsm0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.4-k3s1/go.mod h1:2k5w33TDDiX5MW45QFLk1KqByb4Su/g73InDkoC1xls=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.4-k3s1 h1:rO/v0Bm9Om7CaWbFzkpOHSoPdPFjoab6Taxc9HgJ8uk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.4-k3s1/go.mod h1:6CzB8GMVD+ZlCXZjHiqccHpC9FFlRTCz+rHd176Lxfc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.4-k3s1 h1:4GAkuGnbtclBAXhOZ2sgJrZqHYXxQD8BWVSKvNvqUkc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.4-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.4-k3s1 h1:0s8+hiskstxO4lcoOfjfzcdt3ayOoawqdSDnERL5SPs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.4-k3s1/go.mod h1:23iT4D9thFRxYGZ9DOa7zQ4ZyrFHyp+Bz5dXiruHNwI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.4-k3s1 h1:/RzUe48y9OJoQ1HOPhGyLc8n4yzgpqAu1HmILIQ3pHg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.4-k3s1/go.mod h1:jF57QafZUIyhkboHp3NUlPdpozl8NA4uvyG5FdjbEbU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.4-k3s1 h1:LCrkyxAiW875XzxpN4dE0i04Ij1aGYdoeLICd+pdwDY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.4-k3s1/go.mod h1:xgsxxK8T8eljLwrh87vAyytjDFMbBsrM95vgHrf1OUg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.4-k3s1 h1:FtPvH0+DNEKj0p1Wm8A9NUGRC7C9t1xmlQOkipwJBDk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.4-k3s1/go.mod h1:iv4u51XYDkRdyvp7BBP+KuQ+ZHjEjoCECFVzMJBDGZA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.4-k3s1 h1:J8QwGeA4QhyDt7exJrqk1ZbU8zOe7G9Afwo7xhnvLF8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.4-k3s1/go.mod h1:HZHgmugH8oA4ZxTPt9R8HYJBWEeLDegmaGoPo3HDK3I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.4-k3s1/go.mod h1:tl3x2SPSVsIqnioD4z87jXFemilbRh1EYDm3KirMsjI=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "19"
|
||||
gitVersion = "v1.19.3-k3s1"
|
||||
gitCommit = "a38be968fd7010255807381f6d846c4082a838db"
|
||||
gitVersion = "v1.19.4-k3s1"
|
||||
gitCommit = "6a36e936c20163fe7c4d601ef0e38b5f711f6f1a"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-10-14T17:54:23Z"
|
||||
buildDate = "2020-11-14T08:02:16Z"
|
||||
)
|
||||
|
6
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
6
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
@ -290,8 +290,12 @@ func (le *LeaderElector) release() bool {
|
||||
if !le.IsLeader() {
|
||||
return true
|
||||
}
|
||||
now := metav1.Now()
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaderTransitions: le.observedRecord.LeaderTransitions,
|
||||
LeaseDurationSeconds: 1,
|
||||
RenewTime: now,
|
||||
AcquireTime: now,
|
||||
}
|
||||
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
|
||||
klog.Errorf("Failed to release lock: %v", err)
|
||||
|
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
@ -92,8 +92,12 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
return err
|
||||
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm = cm
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
8
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
@ -87,8 +87,12 @@ func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) e
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
return err
|
||||
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e = e
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
11
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
11
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
generated
vendored
@ -71,9 +71,14 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error
|
||||
return errors.New("lease not initialized, call get or create first")
|
||||
}
|
||||
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
|
||||
var err error
|
||||
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
|
||||
return err
|
||||
|
||||
lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ll.lease = lease
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
|
47
vendor/k8s.io/client-go/transport/cache.go
generated
vendored
47
vendor/k8s.io/client-go/transport/cache.go
generated
vendored
@ -47,12 +47,9 @@ type tlsCacheKey struct {
|
||||
keyData string
|
||||
certFile string
|
||||
keyFile string
|
||||
getCert string
|
||||
serverName string
|
||||
nextProtos string
|
||||
dial string
|
||||
disableCompression bool
|
||||
proxy string
|
||||
}
|
||||
|
||||
func (t tlsCacheKey) String() string {
|
||||
@ -60,22 +57,24 @@ func (t tlsCacheKey) String() string {
|
||||
if len(t.keyData) > 0 {
|
||||
keyText = "<redacted>"
|
||||
}
|
||||
return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s disableCompression:%t, proxy: %s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial, t.disableCompression, t.proxy)
|
||||
return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression)
|
||||
}
|
||||
|
||||
func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
|
||||
key, err := tlsConfigKey(config)
|
||||
key, canCache, err := tlsConfigKey(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure we only create a single transport for the given TLS options
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if canCache {
|
||||
// Ensure we only create a single transport for the given TLS options
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// See if we already have a custom transport for this config
|
||||
if t, ok := c.transports[key]; ok {
|
||||
return t, nil
|
||||
// See if we already have a custom transport for this config
|
||||
if t, ok := c.transports[key]; ok {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the TLS options for this client config
|
||||
@ -110,8 +109,7 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
|
||||
proxy = config.Proxy
|
||||
}
|
||||
|
||||
// Cache a single transport for these options
|
||||
c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
|
||||
transport := utilnet.SetTransportDefaults(&http.Transport{
|
||||
Proxy: proxy,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
@ -119,24 +117,33 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
|
||||
DialContext: dial,
|
||||
DisableCompression: config.DisableCompression,
|
||||
})
|
||||
return c.transports[key], nil
|
||||
|
||||
if canCache {
|
||||
// Cache a single transport for these options
|
||||
c.transports[key] = transport
|
||||
}
|
||||
|
||||
return transport, nil
|
||||
}
|
||||
|
||||
// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor
|
||||
func tlsConfigKey(c *Config) (tlsCacheKey, error) {
|
||||
func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) {
|
||||
// Make sure ca/key/cert content is loaded
|
||||
if err := loadTLSFiles(c); err != nil {
|
||||
return tlsCacheKey{}, err
|
||||
return tlsCacheKey{}, false, err
|
||||
}
|
||||
|
||||
if c.TLS.GetCert != nil || c.Dial != nil || c.Proxy != nil {
|
||||
// cannot determine equality for functions
|
||||
return tlsCacheKey{}, false, nil
|
||||
}
|
||||
|
||||
k := tlsCacheKey{
|
||||
insecure: c.TLS.Insecure,
|
||||
caData: string(c.TLS.CAData),
|
||||
getCert: fmt.Sprintf("%p", c.TLS.GetCert),
|
||||
serverName: c.TLS.ServerName,
|
||||
nextProtos: strings.Join(c.TLS.NextProtos, ","),
|
||||
dial: fmt.Sprintf("%p", c.Dial),
|
||||
disableCompression: c.DisableCompression,
|
||||
proxy: fmt.Sprintf("%p", c.Proxy),
|
||||
}
|
||||
|
||||
if c.TLS.ReloadTLSFiles {
|
||||
@ -147,5 +154,5 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) {
|
||||
k.keyData = string(c.TLS.KeyData)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
return k, true, nil
|
||||
}
|
||||
|
9
vendor/k8s.io/cloud-provider/controllers/node/node_controller.go
generated
vendored
9
vendor/k8s.io/cloud-provider/controllers/node/node_controller.go
generated
vendored
@ -414,7 +414,10 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co
|
||||
err error
|
||||
)
|
||||
|
||||
if node.Spec.ProviderID == "" {
|
||||
// skip the provider ID check for InstancesV2, GetInstanceProviderID only builds a ProviderID for Instances
|
||||
// we will set up providerID for InstancesV2 if instanceMeta has non-empty providerID
|
||||
_, instancesV2Enabled := cnc.cloud.InstancesV2()
|
||||
if node.Spec.ProviderID == "" && !instancesV2Enabled {
|
||||
providerID, err = cloudprovider.GetInstanceProviderID(ctx, cnc.cloud, types.NodeName(node.Name))
|
||||
if err == nil {
|
||||
nodeModifiers = append(nodeModifiers, func(n *v1.Node) {
|
||||
@ -466,6 +469,10 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node.Spec.ProviderID == "" && instanceMeta.ProviderID != "" {
|
||||
nodeModifiers = append(nodeModifiers, func(n *v1.Node) { n.Spec.ProviderID = instanceMeta.ProviderID })
|
||||
}
|
||||
|
||||
// If user provided an IP address, ensure that IP address is found
|
||||
// in the cloud provider before removing the taint on the node
|
||||
if nodeIP, ok := ensureNodeProvidedIPExists(node, instanceMeta.NodeAddresses); ok && nodeIP == nil {
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "19"
|
||||
gitVersion = "v1.19.3-k3s1"
|
||||
gitCommit = "a38be968fd7010255807381f6d846c4082a838db"
|
||||
gitVersion = "v1.19.4-k3s1"
|
||||
gitCommit = "6a36e936c20163fe7c4d601ef0e38b5f711f6f1a"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-10-14T17:54:23Z"
|
||||
buildDate = "2020-11-14T08:02:16Z"
|
||||
)
|
||||
|
55
vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go
generated
vendored
55
vendor/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go
generated
vendored
@ -86,6 +86,53 @@ type AvailableConditionController struct {
|
||||
cache map[string]map[string][]string
|
||||
// this lock protects operations on the above cache
|
||||
cacheLock sync.RWMutex
|
||||
|
||||
// TLS config with customized dialer cannot be cached by the client-go
|
||||
// tlsTransportCache. Use a local cache here to reduce the chance of
|
||||
// the controller spamming idle connections with short-lived transports.
|
||||
// NOTE: the cache works because we assume that the transports constructed
|
||||
// by the controller only vary on the dynamic cert/key.
|
||||
tlsCache *tlsTransportCache
|
||||
}
|
||||
|
||||
type tlsTransportCache struct {
|
||||
mu sync.Mutex
|
||||
transports map[tlsCacheKey]http.RoundTripper
|
||||
}
|
||||
|
||||
func (c *tlsTransportCache) get(config *rest.Config) (http.RoundTripper, error) {
|
||||
// If the available controller doesn't customzie the dialer (and we know from
|
||||
// the code that the controller doesn't customzie other functions i.e. Proxy
|
||||
// and GetCert (ExecProvider)), the config is cacheable by the client-go TLS
|
||||
// transport cache. Let's skip the local cache and depend on the client-go cache.
|
||||
if config.Dial == nil {
|
||||
return rest.TransportFor(config)
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
// See if we already have a custom transport for this config
|
||||
key := tlsConfigKey(config)
|
||||
if t, ok := c.transports[key]; ok {
|
||||
return t, nil
|
||||
}
|
||||
restTransport, err := rest.TransportFor(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.transports[key] = restTransport
|
||||
return restTransport, nil
|
||||
}
|
||||
|
||||
type tlsCacheKey struct {
|
||||
certData string
|
||||
keyData string
|
||||
}
|
||||
|
||||
func tlsConfigKey(c *rest.Config) tlsCacheKey {
|
||||
return tlsCacheKey{
|
||||
certData: string(c.TLSClientConfig.CertData),
|
||||
keyData: string(c.TLSClientConfig.KeyData),
|
||||
}
|
||||
}
|
||||
|
||||
// NewAvailableConditionController returns a new AvailableConditionController.
|
||||
@ -115,6 +162,7 @@ func NewAvailableConditionController(
|
||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 30*time.Second),
|
||||
"AvailableConditionController"),
|
||||
proxyCurrentCertKeyContent: proxyCurrentCertKeyContent,
|
||||
tlsCache: &tlsTransportCache{transports: make(map[tlsCacheKey]http.RoundTripper)},
|
||||
}
|
||||
|
||||
if egressSelector != nil {
|
||||
@ -185,7 +233,12 @@ func (c *AvailableConditionController) sync(key string) error {
|
||||
if c.dialContext != nil {
|
||||
restConfig.Dial = c.dialContext
|
||||
}
|
||||
restTransport, err := rest.TransportFor(restConfig)
|
||||
// TLS config with customized dialer cannot be cached by the client-go
|
||||
// tlsTransportCache. Use a local cache here to reduce the chance of
|
||||
// the controller spamming idle connections with short-lived transports.
|
||||
// NOTE: the cache works because we assume that the transports constructed
|
||||
// by the controller only vary on the dynamic cert/key.
|
||||
restTransport, err := c.tlsCache.get(restConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
4
vendor/k8s.io/kubectl/pkg/describe/describe.go
generated
vendored
4
vendor/k8s.io/kubectl/pkg/describe/describe.go
generated
vendored
@ -2465,7 +2465,7 @@ func (i *IngressDescriber) describeBackendV1(ns string, backend *networkingv1.In
|
||||
}
|
||||
}
|
||||
ep := formatEndpoints(endpoints, sets.NewString(spName))
|
||||
return fmt.Sprintf("%s\t %s)", sb, ep)
|
||||
return fmt.Sprintf("%s (%s)", sb, ep)
|
||||
}
|
||||
if backend.Resource != nil {
|
||||
ic := backend.Resource
|
||||
@ -2518,7 +2518,7 @@ func (i *IngressDescriber) describeIngressV1(ing *networkingv1.Ingress, events *
|
||||
}
|
||||
}
|
||||
if count == 0 {
|
||||
w.Write(LEVEL_1, "\t%s %s\n", "*", "*", i.describeBackendV1(ns, def))
|
||||
w.Write(LEVEL_1, "%s\t%s\t%s\n", "*", "*", i.describeBackendV1(ns, def))
|
||||
}
|
||||
printAnnotationsMultiline(w, "Annotations", ing.Annotations)
|
||||
|
||||
|
1
vendor/k8s.io/kubectl/pkg/drain/drain.go
generated
vendored
1
vendor/k8s.io/kubectl/pkg/drain/drain.go
generated
vendored
@ -318,7 +318,6 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
|
11
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go
generated
vendored
11
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go
generated
vendored
@ -700,8 +700,17 @@ func Complete(s *options.ServerRunOptions) (completedServerRunOptions, error) {
|
||||
}
|
||||
|
||||
if s.Etcd.EnableWatchCache {
|
||||
sizes := kubeapiserver.DefaultWatchCacheSizes()
|
||||
// Ensure that overrides parse correctly.
|
||||
if _, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err != nil {
|
||||
userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes)
|
||||
if err != nil {
|
||||
return options, err
|
||||
}
|
||||
for resource, size := range userSpecified {
|
||||
sizes[resource] = size
|
||||
}
|
||||
s.Etcd.WatchCacheSizes, err = serveroptions.WriteWatchCacheSizes(sizes)
|
||||
if err != nil {
|
||||
return options, err
|
||||
}
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go
generated
vendored
@ -359,7 +359,6 @@ func startVolumeExpandController(ctx ControllerContext) (http.Handler, bool, err
|
||||
ctx.ClientBuilder.ClientOrDie("expand-controller"),
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumes(),
|
||||
ctx.InformerFactory.Storage().V1().StorageClasses(),
|
||||
ctx.Cloud,
|
||||
plugins,
|
||||
csiTranslator,
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -998,15 +998,17 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
|
||||
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
|
||||
return currentReplicas // Scaling is disabled
|
||||
} else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect {
|
||||
result = math.MaxInt32
|
||||
selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value
|
||||
} else {
|
||||
result = math.MinInt32
|
||||
selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
|
||||
}
|
||||
for _, policy := range scalingRules.Policies {
|
||||
replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleEvents)
|
||||
periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod
|
||||
if policy.Type == autoscalingv2.PodsScalingPolicy {
|
||||
proposed = int32(periodStartReplicas + policy.Value)
|
||||
proposed = periodStartReplicas + policy.Value
|
||||
} else if policy.Type == autoscalingv2.PercentScalingPolicy {
|
||||
// the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up
|
||||
proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100)))
|
||||
@ -1018,14 +1020,16 @@ func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleEvents []
|
||||
|
||||
// calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
|
||||
func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
|
||||
var result int32 = math.MaxInt32
|
||||
var result int32
|
||||
var proposed int32
|
||||
var selectPolicyFn func(int32, int32) int32
|
||||
if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
|
||||
return currentReplicas // Scaling is disabled
|
||||
} else if *scalingRules.SelectPolicy == autoscalingv2.MinPolicySelect {
|
||||
result = math.MinInt32
|
||||
selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value
|
||||
} else {
|
||||
result = math.MaxInt32
|
||||
selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
|
||||
}
|
||||
for _, policy := range scalingRules.Policies {
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
@ -76,8 +76,9 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
return 0, 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
|
||||
}
|
||||
|
||||
readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
|
||||
readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
|
||||
removeMetricsForPods(metrics, ignoredPods)
|
||||
removeMetricsForPods(metrics, unreadyPods)
|
||||
requests, err := calculatePodRequests(podList, resource)
|
||||
if err != nil {
|
||||
return 0, 0, 0, time.Time{}, err
|
||||
@ -92,7 +93,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
return 0, 0, 0, time.Time{}, err
|
||||
}
|
||||
|
||||
rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0
|
||||
rebalanceIgnored := len(unreadyPods) > 0 && usageRatio > 1.0
|
||||
if !rebalanceIgnored && len(missingPods) == 0 {
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
// return the current replicas if the change would be too small
|
||||
@ -119,7 +120,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
|
||||
if rebalanceIgnored {
|
||||
// on a scale-up, treat unready pods as using 0% of the resource request
|
||||
for podName := range ignoredPods {
|
||||
for podName := range unreadyPods {
|
||||
metrics[podName] = metricsclient.PodMetric{Value: 0}
|
||||
}
|
||||
}
|
||||
@ -184,8 +185,9 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
|
||||
return 0, 0, fmt.Errorf("no pods returned by selector while calculating replica count")
|
||||
}
|
||||
|
||||
readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
|
||||
readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
|
||||
removeMetricsForPods(metrics, ignoredPods)
|
||||
removeMetricsForPods(metrics, unreadyPods)
|
||||
|
||||
if len(metrics) == 0 {
|
||||
return 0, 0, fmt.Errorf("did not receive metrics for any ready pods")
|
||||
@ -193,7 +195,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
|
||||
|
||||
usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)
|
||||
|
||||
rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0
|
||||
rebalanceIgnored := len(unreadyPods) > 0 && usageRatio > 1.0
|
||||
|
||||
if !rebalanceIgnored && len(missingPods) == 0 {
|
||||
if math.Abs(1.0-usageRatio) <= c.tolerance {
|
||||
@ -221,7 +223,7 @@ func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMet
|
||||
|
||||
if rebalanceIgnored {
|
||||
// on a scale-up, treat unready pods as using 0% of the resource request
|
||||
for podName := range ignoredPods {
|
||||
for podName := range unreadyPods {
|
||||
metrics[podName] = metricsclient.PodMetric{Value: 0}
|
||||
}
|
||||
}
|
||||
@ -366,16 +368,18 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32
|
||||
return replicaCount, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, ignoredPods sets.String, missingPods sets.String) {
|
||||
func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, unreadyPods, missingPods, ignoredPods sets.String) {
|
||||
missingPods = sets.NewString()
|
||||
unreadyPods = sets.NewString()
|
||||
ignoredPods = sets.NewString()
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed {
|
||||
ignoredPods.Insert(pod.Name)
|
||||
continue
|
||||
}
|
||||
// Pending pods are ignored.
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
ignoredPods.Insert(pod.Name)
|
||||
unreadyPods.Insert(pod.Name)
|
||||
continue
|
||||
}
|
||||
// Pods missing metrics.
|
||||
@ -386,22 +390,22 @@ func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1
|
||||
}
|
||||
// Unready pods are ignored.
|
||||
if resource == v1.ResourceCPU {
|
||||
var ignorePod bool
|
||||
var unready bool
|
||||
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
|
||||
if condition == nil || pod.Status.StartTime == nil {
|
||||
ignorePod = true
|
||||
unready = true
|
||||
} else {
|
||||
// Pod still within possible initialisation period.
|
||||
if pod.Status.StartTime.Add(cpuInitializationPeriod).After(time.Now()) {
|
||||
// Ignore sample if pod is unready or one window of metric wasn't collected since last state transition.
|
||||
ignorePod = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window))
|
||||
unready = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window))
|
||||
} else {
|
||||
// Ignore metric if pod is unready and it has never been ready.
|
||||
ignorePod = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time)
|
||||
unready = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time)
|
||||
}
|
||||
}
|
||||
if ignorePod {
|
||||
ignoredPods.Insert(pod.Name)
|
||||
if unready {
|
||||
unreadyPods.Insert(pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/BUILD
generated
vendored
@ -7,7 +7,6 @@ go_library(
|
||||
srcs = ["expand_controller.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/expand",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/controller/volume/events:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csimigration:go_default_library",
|
||||
@ -22,12 +21,10 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
@ -68,7 +65,6 @@ go_test(
|
||||
"//pkg/volume/util/operationexecutor:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
36
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/controller/volume/expand/expand_controller.go
generated
vendored
@ -32,18 +32,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageclassinformer "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
kcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csimigration"
|
||||
@ -82,10 +79,6 @@ type expandController struct {
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
pvSynced kcache.InformerSynced
|
||||
|
||||
// storageClass lister for fetching provisioner name
|
||||
classLister storagelisters.StorageClassLister
|
||||
classListerSynced cache.InformerSynced
|
||||
|
||||
// cloud provider used by volume host
|
||||
cloud cloudprovider.Interface
|
||||
|
||||
@ -109,7 +102,6 @@ func NewExpandController(
|
||||
kubeClient clientset.Interface,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
scInformer storageclassinformer.StorageClassInformer,
|
||||
cloud cloudprovider.Interface,
|
||||
plugins []volume.VolumePlugin,
|
||||
translator CSINameTranslator,
|
||||
@ -122,8 +114,6 @@ func NewExpandController(
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
pvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
classLister: scInformer.Lister(),
|
||||
classListerSynced: scInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "volume_expand"),
|
||||
translator: translator,
|
||||
csiMigratedPluginManager: csiMigratedPluginManager,
|
||||
@ -236,19 +226,6 @@ func (expc *expandController) syncHandler(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
claimClass := v1helper.GetPersistentVolumeClaimClass(pvc)
|
||||
if claimClass == "" {
|
||||
klog.V(4).Infof("volume expansion is disabled for PVC without StorageClasses: %s", util.ClaimToClaimKey(pvc))
|
||||
return nil
|
||||
}
|
||||
|
||||
class, err := expc.classLister.Get(claimClass)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("failed to expand PVC: %s with error: %v", util.ClaimToClaimKey(pvc), err)
|
||||
return nil
|
||||
}
|
||||
|
||||
volumeResizerName := class.Provisioner
|
||||
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
|
||||
migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec)
|
||||
if err != nil {
|
||||
@ -257,9 +234,15 @@ func (expc *expandController) syncHandler(key string) error {
|
||||
}
|
||||
// handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree
|
||||
if migratable {
|
||||
msg := fmt.Sprintf("CSI migration enabled for %s; waiting for external resizer to expand the pvc", volumeResizerName)
|
||||
inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error getting in-tree plugin name from persistent volume %s: %v", volumeSpec.PersistentVolume.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("CSI migration enabled for %s; waiting for external resizer to expand the pvc", inTreePluginName)
|
||||
expc.recorder.Event(pvc, v1.EventTypeNormal, events.ExternalExpanding, msg)
|
||||
csiResizerName, err := expc.translator.GetCSINameFromInTreeName(class.Provisioner)
|
||||
csiResizerName, err := expc.translator.GetCSINameFromInTreeName(inTreePluginName)
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf("error getting CSI driver name for pvc %s, with error %v", util.ClaimToClaimKey(pvc), err)
|
||||
expc.recorder.Event(pvc, v1.EventTypeWarning, events.ExternalExpanding, errorMsg)
|
||||
@ -290,6 +273,7 @@ func (expc *expandController) syncHandler(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
volumeResizerName := volumePlugin.GetPluginName()
|
||||
return expc.expand(pvc, pv, volumeResizerName)
|
||||
}
|
||||
|
||||
@ -319,7 +303,7 @@ func (expc *expandController) Run(stopCh <-chan struct{}) {
|
||||
klog.Infof("Starting expand controller")
|
||||
defer klog.Infof("Shutting down expand controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced, expc.classListerSynced) {
|
||||
if !cache.WaitForNamedCacheSync("expand", stopCh, expc.pvcsSynced, expc.pvSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/kubeapiserver/default_storage_factory_builder.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubeapiserver/default_storage_factory_builder.go
generated
vendored
@ -49,6 +49,15 @@ var SpecialDefaultResourcePrefixes = map[schema.GroupResource]string{
|
||||
{Group: "policy", Resource: "podsecuritypolicies"}: "podsecuritypolicy",
|
||||
}
|
||||
|
||||
// DefaultWatchCacheSizes defines default resources for which watchcache
|
||||
// should be disabled.
|
||||
func DefaultWatchCacheSizes() map[schema.GroupResource]int {
|
||||
return map[schema.GroupResource]int{
|
||||
{Resource: "events"}: 0,
|
||||
{Group: "events.k8s.io", Resource: "events"}: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// NewStorageFactoryConfig returns a new StorageFactoryConfig set up with necessary resource overrides.
|
||||
func NewStorageFactoryConfig() *StorageFactoryConfig {
|
||||
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
@ -505,19 +505,30 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
changes.CreateSandbox = false
|
||||
return changes
|
||||
}
|
||||
|
||||
// Get the containers to start, excluding the ones that succeeded if RestartPolicy is OnFailure.
|
||||
var containersToStart []int
|
||||
for idx, c := range pod.Spec.Containers {
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure && containerSucceeded(&c, podStatus) {
|
||||
continue
|
||||
}
|
||||
containersToStart = append(containersToStart, idx)
|
||||
}
|
||||
// We should not create a sandbox for a Pod if initialization is done and there is no container to start.
|
||||
if len(containersToStart) == 0 {
|
||||
_, _, done := findNextInitContainerToRun(pod, podStatus)
|
||||
if done {
|
||||
changes.CreateSandbox = false
|
||||
return changes
|
||||
}
|
||||
}
|
||||
|
||||
if len(pod.Spec.InitContainers) != 0 {
|
||||
// Pod has init containers, return the first one.
|
||||
changes.NextInitContainerToStart = &pod.Spec.InitContainers[0]
|
||||
return changes
|
||||
}
|
||||
// Start all containers by default but exclude the ones that succeeded if
|
||||
// RestartPolicy is OnFailure.
|
||||
for idx, c := range pod.Spec.Containers {
|
||||
if containerSucceeded(&c, podStatus) && pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {
|
||||
continue
|
||||
}
|
||||
changes.ContainersToStart = append(changes.ContainersToStart, idx)
|
||||
}
|
||||
changes.ContainersToStart = containersToStart
|
||||
return changes
|
||||
}
|
||||
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
@ -1183,9 +1183,10 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
allowFromNode := false
|
||||
for _, src := range svcInfo.LoadBalancerSourceRanges() {
|
||||
writeLine(proxier.natRules, append(args, "-s", src, "-j", string(chosenChain))...)
|
||||
// ignore error because it has been validated
|
||||
_, cidr, _ := net.ParseCIDR(src)
|
||||
if cidr.Contains(proxier.nodeIP) {
|
||||
_, cidr, err := net.ParseCIDR(src)
|
||||
if err != nil {
|
||||
klog.Errorf("Error parsing %s CIDR in LoadBalancerSourceRanges, dropping: %v", cidr, err)
|
||||
} else if cidr.Contains(proxier.nodeIP) {
|
||||
allowFromNode = true
|
||||
}
|
||||
}
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/proxy/service.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/proxy/service.go
generated
vendored
@ -146,10 +146,14 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
|
||||
topologyKeys: service.Spec.TopologyKeys,
|
||||
}
|
||||
|
||||
loadBalancerSourceRanges := make([]string, len(service.Spec.LoadBalancerSourceRanges))
|
||||
for i, sourceRange := range service.Spec.LoadBalancerSourceRanges {
|
||||
loadBalancerSourceRanges[i] = strings.TrimSpace(sourceRange)
|
||||
}
|
||||
|
||||
if sct.isIPv6Mode == nil {
|
||||
info.externalIPs = make([]string, len(service.Spec.ExternalIPs))
|
||||
info.loadBalancerSourceRanges = make([]string, len(service.Spec.LoadBalancerSourceRanges))
|
||||
copy(info.loadBalancerSourceRanges, service.Spec.LoadBalancerSourceRanges)
|
||||
info.loadBalancerSourceRanges = loadBalancerSourceRanges
|
||||
copy(info.externalIPs, service.Spec.ExternalIPs)
|
||||
// Deep-copy in case the service instance changes
|
||||
info.loadBalancerStatus = *service.Status.LoadBalancer.DeepCopy()
|
||||
@ -162,7 +166,7 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
|
||||
if len(incorrectIPs) > 0 {
|
||||
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
|
||||
}
|
||||
info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(service.Spec.LoadBalancerSourceRanges, *sct.isIPv6Mode)
|
||||
info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(loadBalancerSourceRanges, *sct.isIPv6Mode)
|
||||
if len(incorrectIPs) > 0 {
|
||||
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/BUILD
generated
vendored
@ -67,6 +67,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
|
@ -285,7 +285,7 @@ func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.C
|
||||
}
|
||||
|
||||
// However, "empty" preFilterState is legit which tolerates every toSchedule Pod.
|
||||
if len(s.TpPairToMatchNum) == 0 || len(s.Constraints) == 0 {
|
||||
if len(s.Constraints) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/types.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/types.go
generated
vendored
@ -391,8 +391,10 @@ func (r *Resource) SetMaxResource(rl v1.ResourceList) {
|
||||
r.MilliCPU = cpu
|
||||
}
|
||||
case v1.ResourceEphemeralStorage:
|
||||
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
|
||||
r.EphemeralStorage = ephemeralStorage
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
|
||||
r.EphemeralStorage = ephemeralStorage
|
||||
}
|
||||
}
|
||||
default:
|
||||
if v1helper.IsScalarResourceName(rName) {
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/BUILD
generated
vendored
@ -13,6 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/api/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp/strategy.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
podutil "k8s.io/kubernetes/pkg/api/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -67,6 +68,15 @@ func NewStrategy(pspAnnotations map[string]string) Strategy {
|
||||
allowAnyProfile = true
|
||||
continue
|
||||
}
|
||||
// With the graduation of seccomp to GA we automatically convert
|
||||
// the deprecated seccomp profile annotation `docker/default` to
|
||||
// `runtime/default`. This means that we now have to automatically
|
||||
// allow `runtime/default` if a user specifies `docker/default` and
|
||||
// vice versa in a PSP.
|
||||
if p == v1.DeprecatedSeccompProfileDockerDefault || p == v1.SeccompProfileRuntimeDefault {
|
||||
allowedProfiles[v1.SeccompProfileRuntimeDefault] = true
|
||||
allowedProfiles[v1.DeprecatedSeccompProfileDockerDefault] = true
|
||||
}
|
||||
allowedProfiles[p] = true
|
||||
}
|
||||
}
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
@ -285,6 +285,15 @@ func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) erro
|
||||
|
||||
// UnmountDevice unmounts the volume on the node
|
||||
func (d *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Flush data cache for windows because it does not do so automatically during unmount device
|
||||
exec := d.plugin.host.GetExec(d.plugin.GetPluginName())
|
||||
err := util.WriteVolumeCache(deviceMountPath, exec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := mount.CleanupMountPoint(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()), false)
|
||||
if err == nil {
|
||||
klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
|
||||
|
48
vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go
generated
vendored
48
vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go
generated
vendored
@ -20,6 +20,7 @@ package azure
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
|
||||
@ -42,6 +43,12 @@ const (
|
||||
|
||||
// operationCanceledErrorMessage means the operation is canceled by another new operation.
|
||||
operationCanceledErrorMessage = "canceledandsupersededduetoanotheroperation"
|
||||
|
||||
referencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned"
|
||||
)
|
||||
|
||||
var (
|
||||
pipErrorMessageRE = regexp.MustCompile(`(?:.*)/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Network/publicIPAddresses/([^\s]+)(?:.*)`)
|
||||
)
|
||||
|
||||
// RequestBackoff if backoff is disabled in cloud provider it
|
||||
@ -180,7 +187,7 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer)
|
||||
defer cancel()
|
||||
|
||||
rgName := az.getLoadBalancerResourceGroup()
|
||||
rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, *lb.Name, lb, to.String(lb.Etag))
|
||||
rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, to.String(lb.Name), lb, to.String(lb.Etag))
|
||||
klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
|
||||
if rerr == nil {
|
||||
// Invalidate the cache right after updating
|
||||
@ -190,12 +197,39 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer)
|
||||
|
||||
// Invalidate the cache because ETAG precondition mismatch.
|
||||
if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
|
||||
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", *lb.Name)
|
||||
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", to.String(lb.Name))
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
}
|
||||
|
||||
retryErrorMessage := rerr.Error().Error()
|
||||
// Invalidate the cache because another new operation has canceled the current request.
|
||||
if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
|
||||
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", *lb.Name)
|
||||
if strings.Contains(strings.ToLower(retryErrorMessage), operationCanceledErrorMessage) {
|
||||
klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", to.String(lb.Name))
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
}
|
||||
|
||||
// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state
|
||||
if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(referencedResourceNotProvisionedMessageCode)) {
|
||||
matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)
|
||||
if len(matches) != 3 {
|
||||
klog.Warningf("Failed to parse the retry error message %s", retryErrorMessage)
|
||||
return rerr.Error()
|
||||
}
|
||||
pipRG, pipName := matches[1], matches[2]
|
||||
klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, to.String(lb.Name))
|
||||
pip, _, err := az.getPublicIPAddress(pipRG, pipName)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err)
|
||||
return rerr.Error()
|
||||
}
|
||||
// Perform a dummy update to fix the provisioning state
|
||||
err = az.CreateOrUpdatePIP(service, pipRG, pip)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err)
|
||||
return rerr.Error()
|
||||
}
|
||||
// Invalidate the LB cache, return the error, and the controller manager
|
||||
// would retry the LB update in the next reconcile loop
|
||||
az.lbCache.Delete(*lb.Name)
|
||||
}
|
||||
|
||||
@ -239,10 +273,10 @@ func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string,
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip)
|
||||
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name)
|
||||
rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, to.String(pip.Name), pip)
|
||||
klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, to.String(pip.Name))
|
||||
if rerr != nil {
|
||||
klog.Errorf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s", pipResourceGroup, *pip.Name, rerr.Error().Error())
|
||||
klog.Errorf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s", pipResourceGroup, to.String(pip.Name), rerr.Error().Error())
|
||||
az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error())
|
||||
return rerr.Error()
|
||||
}
|
||||
|
20
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go
generated
vendored
20
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go
generated
vendored
@ -44,6 +44,9 @@ const (
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
// Disk Caching is not supported for disks 4 TiB and larger
|
||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
|
||||
diskCachingLimit = 4096 // GiB
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
@ -156,10 +159,21 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
return -1, danglingErr
|
||||
}
|
||||
|
||||
if disk.DiskProperties != nil && disk.DiskProperties.Encryption != nil &&
|
||||
disk.DiskProperties.Encryption.DiskEncryptionSetID != nil {
|
||||
diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
|
||||
if disk.DiskProperties != nil {
|
||||
if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone {
|
||||
// Disk Caching is not supported for disks 4 TiB and larger
|
||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
|
||||
cachingMode = compute.CachingTypesNone
|
||||
klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None",
|
||||
diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit)
|
||||
}
|
||||
|
||||
if disk.DiskProperties.Encryption != nil &&
|
||||
disk.DiskProperties.Encryption.DiskEncryptionSetID != nil {
|
||||
diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok {
|
||||
if v != nil && strings.EqualFold(*v, "true") {
|
||||
writeAcceleratorEnabled = true
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/gce/gce_instances.go
generated
vendored
@ -498,7 +498,11 @@ func (g *Cloud) getInstancesByNames(names []string) ([]*gceInstance, error) {
|
||||
return nil, err
|
||||
}
|
||||
if len(foundInstances) != len(names) {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
if len(foundInstances) == 0 {
|
||||
// return error so the TargetPool nodecount does not drop to 0 unexpectedly.
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
klog.Warningf("getFoundInstanceByNames - input instances %d, found %d. Continuing LoadBalancer Update", len(names), len(foundInstances))
|
||||
}
|
||||
return foundInstances, nil
|
||||
}
|
||||
|
92
vendor/modules.txt
vendored
92
vendor/modules.txt
vendored
@ -1338,7 +1338,7 @@ gopkg.in/warnings.v0
|
||||
gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1
|
||||
# k8s.io/api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -1384,7 +1384,7 @@ k8s.io/api/settings/v1alpha1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.4-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
@ -1424,7 +1424,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1
|
||||
# k8s.io/apimachinery v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -1488,7 +1488,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1
|
||||
# k8s.io/apiserver v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
@ -1616,7 +1616,7 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.4-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
@ -1629,7 +1629,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
@ -1868,7 +1868,7 @@ k8s.io/client-go/util/jsonpath
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
@ -1881,13 +1881,13 @@ k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.4-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.4-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
@ -1902,7 +1902,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1
|
||||
# k8s.io/component-base v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
@ -1926,11 +1926,11 @@ k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/term
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/cri-api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1
|
||||
# k8s.io/cri-api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.4-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14
|
||||
@ -1948,7 +1948,7 @@ k8s.io/heapster/metrics/api/v1/types
|
||||
k8s.io/klog
|
||||
# k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/klog/v2
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.4-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
@ -1976,7 +1976,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.4-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
@ -1987,13 +1987,13 @@ k8s.io/kube-openapi/pkg/schemaconv
|
||||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.4-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.4-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1beta1
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.4-k3s1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
@ -2069,11 +2069,11 @@ k8s.io/kubectl/pkg/util/storage
|
||||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.4-k3s1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
# k8s.io/kubernetes v1.19.3 => github.com/rancher/kubernetes v1.19.3-k3s1
|
||||
# k8s.io/kubernetes v1.19.4 => github.com/rancher/kubernetes v1.19.4-k3s1
|
||||
## explicit
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
@ -2812,7 +2812,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.4-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
@ -2854,7 +2854,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.4-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
@ -2951,29 +2951,29 @@ vbom.ml/util/sortorder
|
||||
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
# google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
# k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1
|
||||
# k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1
|
||||
# k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1
|
||||
# k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1
|
||||
# k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1
|
||||
# k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1
|
||||
# k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1
|
||||
# k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1
|
||||
# k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1
|
||||
# k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1
|
||||
# k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1
|
||||
# k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1
|
||||
# k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.3-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1
|
||||
# k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1
|
||||
# k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.3-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.3-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.3-k3s1
|
||||
# k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.3-k3s1
|
||||
# k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.4-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.4-k3s1
|
||||
# k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.4-k3s1
|
||||
# k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.4-k3s1
|
||||
# k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.4-k3s1
|
||||
# k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.4-k3s1
|
||||
# k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.4-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.4-k3s1
|
||||
# k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.4-k3s1
|
||||
# k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.4-k3s1
|
||||
# k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.4-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.4-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.4-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.4-k3s1
|
||||
# k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.4-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.4-k3s1
|
||||
# k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.4-k3s1
|
||||
# k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.4-k3s1
|
||||
# k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.4-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.4-k3s1
|
||||
# k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.4-k3s1
|
||||
# k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.4-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.4-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.4-k3s1
|
||||
# k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.4-k3s1
|
||||
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
|
Loading…
Reference in New Issue
Block a user