mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Update to k8s 1.18.2
This commit is contained in:
parent
46c7aba40a
commit
3986b4787f
51
go.mod
51
go.mod
@ -32,36 +32,37 @@ replace (
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.0-k3s.1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.0-k3s.1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.0-k3s.1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.0-k3s.1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.0-k3s.1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.0-k3s.1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.0-k3s.1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.0-k3s.1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.0-k3s.1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.0-k3s.1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.0-k3s.1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.0-k3s.1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.0-k3s.1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.0-k3s.1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.0-k3s.1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.0-k3s.1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.0-k3s.1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.0-k3s.1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.0-k3s.1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.0-k3s.1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.0-k3s.1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.0-k3s.1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.0-k3s.1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.0-k3s.1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.0-k3s.1
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.2-k3s.1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.2-k3s.1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.2-k3s.1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.2-k3s.1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.2-k3s.1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/bhendo/go-powershell v0.0.0-20190719160123-219e7fb4e41e // indirect
|
||||
github.com/bronze1man/goStrongswanVici v0.0.0-20190828090544-27d02f80ba40 // indirect
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 // indirect
|
||||
|
95
go.sum
95
go.sum
@ -171,17 +171,8 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/distribution v0.0.0-20190205005809-0d3efadf0154 h1:C8WBRZDiZn3IZnBlbHVeTWF32XhVGK69Li4GC/3jL9Q=
|
||||
github.com/docker/distribution v0.0.0-20190205005809-0d3efadf0154/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20191205034852-d163fbba3c82 h1:kf0UYG15U+KsO7mA8Lsz+OkuoC1Ldc+cTQUyUnAFmg8=
|
||||
github.com/docker/docker v1.4.2-0.20191205034852-d163fbba3c82/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c h1:zviRyz1SWO8+WVJbi9/jlJCkrsZ54r/lTRbgtcaQhLs=
|
||||
github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20190219214528-cbe11bdc6da8+incompatible h1:ZZvtagdntE3LlHuNFoj0r5nVFllfu6Mn413hOu01cZI=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20190219214528-cbe11bdc6da8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20190327010347-be7ac8be2ae0+incompatible h1:OGKd8UInXYNzUc+SgqizkHsPoIYJp7+rc5+HcYrWfqs=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20190327010347-be7ac8be2ae0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible h1:G2hY8RD7jB9QaSmcb8mYEIg8QbEvVAB7se8+lXHZHfg=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200309214505-aa6a9891b09c+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
|
||||
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
@ -651,49 +642,49 @@ github.com/rancher/helm-controller v0.4.2-0.20200326195131-eb51d4fa9d8d h1:6w5gC
|
||||
github.com/rancher/helm-controller v0.4.2-0.20200326195131-eb51d4fa9d8d/go.mod h1:3jCGmvjp3bFnbeuHL4HiODje9ZYJ/ujUBNtXHFXrwlM=
|
||||
github.com/rancher/kine v0.3.5 h1:Tm4eOtejpnzs1WFBrXj76lCLvX9czLlTkgqUk9luCQk=
|
||||
github.com/rancher/kine v0.3.5/go.mod h1:xEMl0tLCva9/9me7mXJ3m9Vo6yqHgC4OU3NiK4CPrGQ=
|
||||
github.com/rancher/kubernetes v1.18.0-k3s.1 h1:Y7Fcf/ZV7AHdP+so+wCpehgFRzOf1ktmxROHHeE31NM=
|
||||
github.com/rancher/kubernetes v1.18.0-k3s.1/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.0-k3s.1 h1:uaM2A6HPVnWfsogKBXhGMiywVJqHsMXUN1GD8GjtIzk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.0-k3s.1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.0-k3s.1 h1:tsFKRasTyx/8eGUJT4AHTSpU+R66sygxE32iO4X3Wac=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.0-k3s.1/go.mod h1:tMuEHO85+WtdJsLBJ1U4bh7oB23v/D4vP0BtL39qxM4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.0-k3s.1 h1:aXAKJ4/d6TvVlHUhNpY47gp5ubdguvFVVDEzj1Hq950=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.0-k3s.1/go.mod h1:3Y3wDqxL/YFcgzyFD7r80Z6lMxpRJb/V+dds+b7A2NE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.0-k3s.1 h1:Fu6s637Iw7hIVj6Ef4VWcRwk6wmKQtiEHU4m+7/XlaE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.0-k3s.1/go.mod h1:SeOQ7d1nUCULR978oKuSossKyGzova3DlaXEa1zJ1ns=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.0-k3s.1 h1:70mqPbeJsMiUPKMZlJXnL8VYosp1mZzyF93i/JyI3SU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.0-k3s.1/go.mod h1:M8WtUx89NzNZ4Qx/1diDW/1TSs2Pv9J6//dIYvvtwSs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.0-k3s.1 h1:XhwWYcqrSiiXBP4QDpFzgGkHq2q/Dl5A1VtHwbANKp8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.0-k3s.1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.0-k3s.1 h1:nPRZNeftoghb2pV3bcsolTFP3evTRZMQklF/YRDCISA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.0-k3s.1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.0-k3s.1 h1:ZqQZ5XerkIPIvC/3hTGOF3XqSersKX54O34yi2OoBmo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.0-k3s.1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.0-k3s.1 h1:M2NISXAe3HvZcUruHklylkydgHC/hbf2vdxtI2rkRuM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.0-k3s.1/go.mod h1:Yai6SRJt/nb3VvQw4jKKZBtXRJF/OrswWmfjyF6FqP0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.0-k3s.1 h1:SZ8pLnwgr+oxeRgA667SQQKH0Mw1PPDM6CrLWsPRJ94=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.0-k3s.1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.0-k3s.1 h1:E2BrilrYS2EKNM0s/kFfsx5ctr/JykR/hg2dW1uOtO4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.0-k3s.1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.0-k3s.1 h1:kbd7kgA+cJZf0Lhs1hovIv8XI80Enu7wbBJVhhfoJe8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.0-k3s.1/go.mod h1:BBW+zYPd9dNyl1qZ3U/coU05IW6AvRAbo3s86WKDfzU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.0-k3s.1 h1:en5dUqAPsA4ghQYPcnezSpFjKe0e69USg7tpI2UBneo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.0-k3s.1/go.mod h1:EY3DrCVVj6X1xeVtHF/0lht5TZK9YhKLyfe6QBD3QvE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.0-k3s.1 h1:Cx8p6l6S4RK/rTB9grfYX+ak/0P8ZU4snAy1xzk2i7A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.0-k3s.1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.0-k3s.1 h1:+VacWcjl0AaLuqKrUVkovafr4I5asGSikVIseolYZ0A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.0-k3s.1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.0-k3s.1 h1:iwdlTwf5arobBKyALjKVB/noHMLXn7bOCUBEOVUCnnQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.0-k3s.1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.0-k3s.1 h1:hRsAjoerQPwcUpL8oLyIfDLcDx9qeIyfU1FDYGKKXBE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.0-k3s.1/go.mod h1:lLLodYzjtaBEMebmdtYz2Sh+X8/3DB79imNypXbONsE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.0-k3s.1 h1:T25ls3BHPhyijRk3HU1fW0dhoMnT8FqPXyHaLRgfyV4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.0-k3s.1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.0-k3s.1 h1:ByqIZLUsHLI8qF+RlfXoqSrNB20yL5E5CmIOx2tgWr4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.0-k3s.1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.0-k3s.1 h1:s3MkW8ax5xFeJlMRQjtwDFeP/+On0xJ3CxcJk+8ty9s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.0-k3s.1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.0-k3s.1/go.mod h1:gpiIUEAyQvSEXKbsH2taOEzkrHXvoZwHuArWgR+DpG8=
|
||||
github.com/rancher/kubernetes v1.18.2-k3s.1 h1:LhWNObWF7dL/+T57LkYpuRKtsCBpt0P5G6dRVFG+Ncs=
|
||||
github.com/rancher/kubernetes v1.18.2-k3s.1/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1 h1:tYDY9g8+xLwUcsG9T6Xg7cBkO/vgU6yv7cQKqUN6NDE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1 h1:mjOCIsG8WfV2LROU8xLVGEZ9G5uWrD/xHNc87rLPss8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1/go.mod h1:tMuEHO85+WtdJsLBJ1U4bh7oB23v/D4vP0BtL39qxM4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1 h1:w2RnTwBNOi1QHYFoXbFLXx3Gaw3pPbplxbUPpl76hjw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1/go.mod h1:3Y3wDqxL/YFcgzyFD7r80Z6lMxpRJb/V+dds+b7A2NE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1 h1:/6mV1HGv/QoAjFCPlSZfkdeJagmT8gYwiaOsXxjTZEM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1/go.mod h1:SeOQ7d1nUCULR978oKuSossKyGzova3DlaXEa1zJ1ns=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1 h1:SlXTYNBxaWbxSPyHNRQ/epxqixUiokY04Wh+8gBYTXA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1/go.mod h1:M8WtUx89NzNZ4Qx/1diDW/1TSs2Pv9J6//dIYvvtwSs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1 h1:qCJO8jfGrmJk7Wn8jfqekOua5PizO/joSQUB89vxLB0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1 h1:PmV2L98GjPbD+0UvMR//4I8DiEraMOEl7fq65OGd4hI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1 h1:xTdvOPd4qckfxaqE0vYTdcVhFlYFN7bcS7xg1bnq9Y4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1 h1:UMkQrPLLHpAH+jKNtMBIZw1i2wSuNSgxu7G48WLsoi0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1/go.mod h1:Yai6SRJt/nb3VvQw4jKKZBtXRJF/OrswWmfjyF6FqP0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1 h1:kZyprzrCOdYg1GdB4OPOu7ie2Zyw9ocO6Fa3iG2BhKc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1 h1:ULoh4AB2JiSHA3ELUD56zRh7cnL6/bU8I6AaBbRI/xo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1 h1:1rhSNnADx+2NMudlqoFC1cBjPLblQ0sZeKkiWIUvJZY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1/go.mod h1:BBW+zYPd9dNyl1qZ3U/coU05IW6AvRAbo3s86WKDfzU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1 h1:j0DihywFZbNqzlEE9UikIXoynvLumJFZNHcPrYTr63E=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1/go.mod h1:EY3DrCVVj6X1xeVtHF/0lht5TZK9YhKLyfe6QBD3QvE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1 h1:FEyiGSFRKYejw8aRPbOTfIVukL0DkwhgdfmN36zQrBo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1 h1:fHJ7O2jLBQhDnw7ahecdtmx1l3mJ25fwWtlq3cOPrxw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1 h1:7ZGcqlwBSyLMLVT9r7F9jHRc+dhnlumrHYfblZilkl4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1 h1:oNlYS9R/bsVnlUIeXrbJAxvNPlqhqksJZAoA4eneAdc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1/go.mod h1:lLLodYzjtaBEMebmdtYz2Sh+X8/3DB79imNypXbONsE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1 h1:Fl7NvvCzMDsZWYIh2F3MzQB+EPl7Xh0TTFTAw6SZNbo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1 h1:nldhxCsspFtJPzus/aeLcednyDvAesVgu/XIE5Qa6/8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1 h1:ha8xCCbv3iPsXg+TjB+ZHHxxRyuiWWB9bgTDkgHmLCk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.2-k3s.1/go.mod h1:gpiIUEAyQvSEXKbsH2taOEzkrHXvoZwHuArWgR+DpG8=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI=
|
||||
|
6
vendor/k8s.io/apimachinery/pkg/util/net/http.go
generated
vendored
6
vendor/k8s.io/apimachinery/pkg/util/net/http.go
generated
vendored
@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string {
|
||||
return result
|
||||
}
|
||||
|
||||
// IsTimeout returns true if the given error is a network timeout error
|
||||
func IsTimeout(err error) bool {
|
||||
neterr, ok := err.(net.Error)
|
||||
return ok && neterr != nil && neterr.Timeout()
|
||||
}
|
||||
|
||||
// IsProbableEOF returns true if the given error resembles a connection termination
|
||||
// scenario that would justify assuming that the watch is empty.
|
||||
// These errors are what the Go http stack returns back to us which are general
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
generated
vendored
@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() {
|
||||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
|
||||
default:
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
|
||||
} else {
|
||||
sw.result <- Event{
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "18"
|
||||
gitVersion = "v1.18.0-k3s.1"
|
||||
gitCommit = "f6f006d81c73f96c2405fab723f98a3bcc07b89c"
|
||||
gitVersion = "v1.18.2-k3s.1"
|
||||
gitCommit = "3d7d34a23ec464c08b81486aeca0b7d1bb6e044c"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-03-26T19:55:03Z"
|
||||
buildDate = "2020-04-19T05:33:19Z"
|
||||
)
|
||||
|
2
vendor/k8s.io/client-go/rest/request.go
generated
vendored
2
vendor/k8s.io/client-go/rest/request.go
generated
vendored
@ -655,7 +655,7 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
|
||||
if err != nil {
|
||||
// The watch stream mechanism handles many common partial data errors, so closed
|
||||
// connections can be retried in many cases.
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
return watch.NewEmptyWatch(), nil
|
||||
}
|
||||
return nil, err
|
||||
|
7
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
7
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@ -364,6 +364,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent
|
||||
start := r.clock.Now()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch {
|
||||
@ -390,7 +392,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
@ -417,8 +419,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
|
||||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
start := r.clock.Now()
|
||||
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
|
2
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
2
vendor/k8s.io/client-go/tools/watch/retrywatcher.go
generated
vendored
@ -120,7 +120,7 @@ func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
|
||||
|
||||
default:
|
||||
msg := "Watch failed: %v"
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
klog.V(5).Infof(msg, err)
|
||||
// Retry
|
||||
return false, 0
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "18"
|
||||
gitVersion = "v1.18.0-k3s.1"
|
||||
gitCommit = "f6f006d81c73f96c2405fab723f98a3bcc07b89c"
|
||||
gitVersion = "v1.18.2-k3s.1"
|
||||
gitCommit = "3d7d34a23ec464c08b81486aeca0b7d1bb6e044c"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-03-26T19:55:03Z"
|
||||
buildDate = "2020-04-19T05:33:19Z"
|
||||
)
|
||||
|
1
vendor/k8s.io/kubectl/pkg/cmd/apply/BUILD
generated
vendored
1
vendor/k8s.io/kubectl/pkg/cmd/apply/BUILD
generated
vendored
@ -22,6 +22,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/jsonmergepatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/mergepatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
306
vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go
generated
vendored
306
vendor/k8s.io/kubectl/pkg/cmd/apply/apply.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/printers"
|
||||
@ -315,13 +316,14 @@ func isIncompatibleServerError(err error) bool {
|
||||
return err.(*errors.StatusError).Status().Code == http.StatusUnsupportedMediaType
|
||||
}
|
||||
|
||||
// GetObjects returns a (possibly cached) version of all the objects to apply
|
||||
// as a slice of pointer to resource.Info. The resource.Info contains the object
|
||||
// and some other denormalized data. This function should not be called until
|
||||
// AFTER the "complete" and "validate" methods have been called to ensure that
|
||||
// the ApplyOptions is filled in and valid. Returns an error if the resource
|
||||
// builder returns an error retrieving the objects.
|
||||
// GetObjects returns a (possibly cached) version of all the valid objects to apply
|
||||
// as a slice of pointer to resource.Info and an error if one or more occurred.
|
||||
// IMPORTANT: This function can return both valid objects AND an error, since
|
||||
// "ContinueOnError" is set on the builder. This function should not be called
|
||||
// until AFTER the "complete" and "validate" methods have been called to ensure that
|
||||
// the ApplyOptions is filled in and valid.
|
||||
func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) {
|
||||
var err error = nil
|
||||
if !o.objectsCached {
|
||||
// include the uninitialized objects by default if --prune is true
|
||||
// unless explicitly set --include-uninitialized=false
|
||||
@ -334,17 +336,10 @@ func (o *ApplyOptions) GetObjects() ([]*resource.Info, error) {
|
||||
LabelSelectorParam(o.Selector).
|
||||
Flatten().
|
||||
Do()
|
||||
if err := r.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos, err := r.Infos()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.objects = infos
|
||||
o.objects, err = r.Infos()
|
||||
o.objectsCached = true
|
||||
}
|
||||
return o.objects, nil
|
||||
return o.objects, err
|
||||
}
|
||||
|
||||
// SetObjects stores the set of objects (as resource.Info) to be
|
||||
@ -371,53 +366,78 @@ func (o *ApplyOptions) Run() error {
|
||||
|
||||
// Generates the objects using the resource builder if they have not
|
||||
// already been stored by calling "SetObjects()" in the pre-processor.
|
||||
errs := []error{}
|
||||
infos, err := o.GetObjects()
|
||||
if err != nil {
|
||||
return err
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if len(infos) == 0 {
|
||||
if len(infos) == 0 && len(errs) == 0 {
|
||||
return fmt.Errorf("no objects passed to apply")
|
||||
}
|
||||
// Iterate through all objects, applying each one.
|
||||
for _, info := range infos {
|
||||
if err := o.applyOneObject(info); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
// If any errors occurred during apply, then return error (or
|
||||
// aggregate of errors).
|
||||
if len(errs) == 1 {
|
||||
return errs[0]
|
||||
}
|
||||
if len(errs) > 1 {
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
o.MarkNamespaceVisited(info)
|
||||
if o.PostProcessorFn != nil {
|
||||
klog.V(4).Infof("Running apply post-processor function")
|
||||
if err := o.PostProcessorFn(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := o.Recorder.Record(info.Object); err != nil {
|
||||
klog.V(4).Infof("error recording current command: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *ApplyOptions) applyOneObject(info *resource.Info) error {
|
||||
o.MarkNamespaceVisited(info)
|
||||
|
||||
if err := o.Recorder.Record(info.Object); err != nil {
|
||||
klog.V(4).Infof("error recording current command: %v", err)
|
||||
}
|
||||
|
||||
if o.ServerSideApply {
|
||||
// Send the full object to be applied on the server side.
|
||||
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object)
|
||||
if err != nil {
|
||||
return cmdutil.AddSourceToErr("serverside-apply", info.Source, err)
|
||||
}
|
||||
|
||||
if o.ServerSideApply {
|
||||
// Send the full object to be applied on the server side.
|
||||
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object)
|
||||
if err != nil {
|
||||
return cmdutil.AddSourceToErr("serverside-apply", info.Source, err)
|
||||
}
|
||||
options := metav1.PatchOptions{
|
||||
Force: &o.ForceConflicts,
|
||||
FieldManager: o.FieldManager,
|
||||
}
|
||||
|
||||
options := metav1.PatchOptions{
|
||||
Force: &o.ForceConflicts,
|
||||
FieldManager: o.FieldManager,
|
||||
helper := resource.NewHelper(info.Client, info.Mapping)
|
||||
if o.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
helper := resource.NewHelper(info.Client, info.Mapping)
|
||||
if o.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return err
|
||||
}
|
||||
helper.DryRun(true)
|
||||
helper.DryRun(true)
|
||||
}
|
||||
obj, err := helper.Patch(
|
||||
info.Namespace,
|
||||
info.Name,
|
||||
types.ApplyPatchType,
|
||||
data,
|
||||
&options,
|
||||
)
|
||||
if err != nil {
|
||||
if isIncompatibleServerError(err) {
|
||||
err = fmt.Errorf("Server-side apply not available on the server: (%v)", err)
|
||||
}
|
||||
obj, err := helper.Patch(
|
||||
info.Namespace,
|
||||
info.Name,
|
||||
types.ApplyPatchType,
|
||||
data,
|
||||
&options,
|
||||
)
|
||||
if err != nil {
|
||||
if isIncompatibleServerError(err) {
|
||||
err = fmt.Errorf("Server-side apply not available on the server: (%v)", err)
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
err = fmt.Errorf(`%v
|
||||
if errors.IsConflict(err) {
|
||||
err = fmt.Errorf(`%v
|
||||
Please review the fields above--they currently have other managers. Here
|
||||
are the ways you can resolve this warning:
|
||||
* If you intend to manage all of these fields, please re-run the apply
|
||||
@ -429,136 +449,128 @@ are the ways you can resolve this warning:
|
||||
value; in this case, you'll become the manager if the other manager(s)
|
||||
stop managing the field (remove it from their configuration).
|
||||
See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
info.Refresh(obj, true)
|
||||
|
||||
if err := o.MarkObjectVisited(info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.shouldPrintObject() {
|
||||
continue
|
||||
}
|
||||
|
||||
printer, err := o.ToPrinter("serverside-applied")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the modified configuration of the object. Embed the result
|
||||
// as an annotation in the modified configuration, so that it will appear
|
||||
// in the patch sent to the server.
|
||||
modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme)
|
||||
info.Refresh(obj, true)
|
||||
|
||||
if err := o.MarkObjectVisited(info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.shouldPrintObject() {
|
||||
return nil
|
||||
}
|
||||
|
||||
printer, err := o.ToPrinter("serverside-applied")
|
||||
if err != nil {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := info.Get(); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err)
|
||||
}
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the resource if it doesn't exist
|
||||
// First, update the annotation used by kubectl apply
|
||||
if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil {
|
||||
return cmdutil.AddSourceToErr("creating", info.Source, err)
|
||||
}
|
||||
// Get the modified configuration of the object. Embed the result
|
||||
// as an annotation in the modified configuration, so that it will appear
|
||||
// in the patch sent to the server.
|
||||
modified, err := util.GetModifiedConfiguration(info.Object, true, unstructured.UnstructuredJSONScheme)
|
||||
if err != nil {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%s\nfor:", info.String()), info.Source, err)
|
||||
}
|
||||
|
||||
if o.DryRunStrategy != cmdutil.DryRunClient {
|
||||
// Then create the resource and skip the three-way merge
|
||||
helper := resource.NewHelper(info.Client, info.Mapping)
|
||||
if o.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return cmdutil.AddSourceToErr("creating", info.Source, err)
|
||||
}
|
||||
helper.DryRun(true)
|
||||
}
|
||||
obj, err := helper.Create(info.Namespace, true, info.Object)
|
||||
if err != nil {
|
||||
if err := info.Get(); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%s\nfrom server for:", info.String()), info.Source, err)
|
||||
}
|
||||
|
||||
// Create the resource if it doesn't exist
|
||||
// First, update the annotation used by kubectl apply
|
||||
if err := util.CreateApplyAnnotation(info.Object, unstructured.UnstructuredJSONScheme); err != nil {
|
||||
return cmdutil.AddSourceToErr("creating", info.Source, err)
|
||||
}
|
||||
|
||||
if o.DryRunStrategy != cmdutil.DryRunClient {
|
||||
// Then create the resource and skip the three-way merge
|
||||
helper := resource.NewHelper(info.Client, info.Mapping)
|
||||
if o.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return cmdutil.AddSourceToErr("creating", info.Source, err)
|
||||
}
|
||||
info.Refresh(obj, true)
|
||||
helper.DryRun(true)
|
||||
}
|
||||
|
||||
if err := o.MarkObjectVisited(info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.shouldPrintObject() {
|
||||
continue
|
||||
}
|
||||
|
||||
printer, err := o.ToPrinter("created")
|
||||
obj, err := helper.Create(info.Namespace, true, info.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
return cmdutil.AddSourceToErr("creating", info.Source, err)
|
||||
}
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
info.Refresh(obj, true)
|
||||
}
|
||||
|
||||
if err := o.MarkObjectVisited(info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.DryRunStrategy != cmdutil.DryRunClient {
|
||||
metadata, _ := meta.Accessor(info.Object)
|
||||
annotationMap := metadata.GetAnnotations()
|
||||
if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok {
|
||||
fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName)
|
||||
}
|
||||
|
||||
patcher, err := newPatcher(o, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut)
|
||||
if err != nil {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err)
|
||||
}
|
||||
|
||||
info.Refresh(patchedObject, true)
|
||||
|
||||
if string(patchBytes) == "{}" && !o.shouldPrintObject() {
|
||||
printer, err := o.ToPrinter("unchanged")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if o.shouldPrintObject() {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
printer, err := o.ToPrinter("configured")
|
||||
printer, err := o.ToPrinter("created")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if o.PostProcessorFn != nil {
|
||||
klog.V(4).Infof("Running apply post-processor function")
|
||||
if err := o.PostProcessorFn(); err != nil {
|
||||
if err := o.MarkObjectVisited(info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.DryRunStrategy != cmdutil.DryRunClient {
|
||||
metadata, _ := meta.Accessor(info.Object)
|
||||
annotationMap := metadata.GetAnnotations()
|
||||
if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok {
|
||||
fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName)
|
||||
}
|
||||
|
||||
patcher, err := newPatcher(o, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
patchBytes, patchedObject, err := patcher.Patch(info.Object, modified, info.Source, info.Namespace, info.Name, o.ErrOut)
|
||||
if err != nil {
|
||||
return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err)
|
||||
}
|
||||
|
||||
info.Refresh(patchedObject, true)
|
||||
|
||||
if string(patchBytes) == "{}" && !o.shouldPrintObject() {
|
||||
printer, err := o.ToPrinter("unchanged")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if o.shouldPrintObject() {
|
||||
return nil
|
||||
}
|
||||
|
||||
printer, err := o.ToPrinter("configured")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = printer.PrintObj(info.Object, o.Out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
15
vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go
generated
vendored
15
vendor/k8s.io/kubectl/pkg/cmd/apply/patcher.go
generated
vendored
@ -80,16 +80,9 @@ func newPatcher(o *ApplyOptions, info *resource.Info) (*Patcher, error) {
|
||||
openapiSchema = o.OpenAPISchema
|
||||
}
|
||||
|
||||
helper := resource.NewHelper(info.Client, info.Mapping)
|
||||
if o.DryRunStrategy == cmdutil.DryRunServer {
|
||||
if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
helper.DryRun(true)
|
||||
}
|
||||
return &Patcher{
|
||||
Mapping: info.Mapping,
|
||||
Helper: helper,
|
||||
Helper: resource.NewHelper(info.Client, info.Mapping),
|
||||
DynamicClient: o.DynamicClient,
|
||||
Overwrite: o.Overwrite,
|
||||
BackOff: clockwork.NewRealClock(),
|
||||
@ -185,7 +178,7 @@ func (p *Patcher) patchSimple(obj runtime.Object, modified []byte, source, names
|
||||
}
|
||||
}
|
||||
|
||||
patchedObj, err := p.Helper.Patch(namespace, name, patchType, patch, nil)
|
||||
patchedObj, err := p.Helper.DryRun(p.ServerDryRun).Patch(namespace, name, patchType, patch, nil)
|
||||
return patch, patchedObj, err
|
||||
}
|
||||
|
||||
@ -230,11 +223,11 @@ func (p *Patcher) deleteAndCreate(original runtime.Object, modified []byte, name
|
||||
if err != nil {
|
||||
return modified, nil, err
|
||||
}
|
||||
createdObject, err := p.Helper.Create(namespace, true, versionedObject)
|
||||
createdObject, err := p.Helper.DryRun(p.ServerDryRun).Create(namespace, true, versionedObject)
|
||||
if err != nil {
|
||||
// restore the original object if we fail to create the new one
|
||||
// but still propagate and advertise error to user
|
||||
recreated, recreateErr := p.Helper.Create(namespace, true, original)
|
||||
recreated, recreateErr := p.Helper.DryRun(p.ServerDryRun).Create(namespace, true, original)
|
||||
if recreateErr != nil {
|
||||
err = fmt.Errorf("An error occurred force-replacing the existing object with the newly provided one:\n\n%v.\n\nAdditionally, an error occurred attempting to restore the original object:\n\n%v", err, recreateErr)
|
||||
} else {
|
||||
|
3
vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go
generated
vendored
3
vendor/k8s.io/kubectl/pkg/cmd/apply/prune.go
generated
vendored
@ -77,9 +77,6 @@ func (p *pruner) pruneAll(o *ApplyOptions) error {
|
||||
}
|
||||
|
||||
for n := range p.visitedNamespaces {
|
||||
if len(o.Namespace) != 0 && n != o.Namespace {
|
||||
continue
|
||||
}
|
||||
for _, m := range namespacedRESTMappings {
|
||||
if err := p.prune(n, m); err != nil {
|
||||
return fmt.Errorf("error pruning namespaced object %v: %v", m.GroupVersionKind, err)
|
||||
|
10
vendor/k8s.io/kubectl/pkg/cmd/diff/diff.go
generated
vendored
10
vendor/k8s.io/kubectl/pkg/cmd/diff/diff.go
generated
vendored
@ -312,6 +312,7 @@ func (obj InfoObject) Live() runtime.Object {
|
||||
// Returns the "merged" object, as it would look like if applied or
|
||||
// created.
|
||||
func (obj InfoObject) Merged() (runtime.Object, error) {
|
||||
helper := resource.NewHelper(obj.Info.Client, obj.Info.Mapping).DryRun(true)
|
||||
if obj.ServerSideApply {
|
||||
data, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj.LocalObj)
|
||||
if err != nil {
|
||||
@ -320,9 +321,8 @@ func (obj InfoObject) Merged() (runtime.Object, error) {
|
||||
options := metav1.PatchOptions{
|
||||
Force: &obj.ForceConflicts,
|
||||
FieldManager: obj.FieldManager,
|
||||
DryRun: []string{metav1.DryRunAll},
|
||||
}
|
||||
return resource.NewHelper(obj.Info.Client, obj.Info.Mapping).Patch(
|
||||
return helper.Patch(
|
||||
obj.Info.Namespace,
|
||||
obj.Info.Name,
|
||||
types.ApplyPatchType,
|
||||
@ -334,11 +334,11 @@ func (obj InfoObject) Merged() (runtime.Object, error) {
|
||||
// Build the patcher, and then apply the patch with dry-run, unless the object doesn't exist, in which case we need to create it.
|
||||
if obj.Live() == nil {
|
||||
// Dry-run create if the object doesn't exist.
|
||||
return resource.NewHelper(obj.Info.Client, obj.Info.Mapping).CreateWithOptions(
|
||||
return helper.CreateWithOptions(
|
||||
obj.Info.Namespace,
|
||||
true,
|
||||
obj.LocalObj,
|
||||
&metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}},
|
||||
&metav1.CreateOptions{},
|
||||
)
|
||||
}
|
||||
|
||||
@ -361,7 +361,7 @@ func (obj InfoObject) Merged() (runtime.Object, error) {
|
||||
// We plan on replacing this with server-side apply when it becomes available.
|
||||
patcher := &apply.Patcher{
|
||||
Mapping: obj.Info.Mapping,
|
||||
Helper: resource.NewHelper(obj.Info.Client, obj.Info.Mapping),
|
||||
Helper: helper,
|
||||
Overwrite: true,
|
||||
BackOff: clockwork.NewRealClock(),
|
||||
ServerDryRun: true,
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/BUILD
generated
vendored
@ -10,6 +10,7 @@ go_library(
|
||||
srcs = [
|
||||
"annotations.go",
|
||||
"doc.go",
|
||||
"helpers.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/helpers.go
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/helpers.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
// DropRoundTripHorizontalPodAutoscalerAnnotations removes any annotations used to serialize round-tripped fields from later API versions,
|
||||
// and returns false if no changes were made and the original input object was returned.
|
||||
// It should always be called when converting internal -> external versions, prior
|
||||
// to setting any of the custom annotations:
|
||||
//
|
||||
// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations)
|
||||
// externalObj.Annotations = annotations
|
||||
//
|
||||
// if internal.SomeField != nil {
|
||||
// if !copiedAnnotations {
|
||||
// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations)
|
||||
// copiedAnnotations = true
|
||||
// }
|
||||
// externalObj.Annotations[...] = json.Marshal(...)
|
||||
// }
|
||||
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
|
||||
_, hasMetricsSpecs := in[MetricSpecsAnnotation]
|
||||
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]
|
||||
_, hasMetricsStatuses := in[MetricStatusesAnnotation]
|
||||
_, hasConditions := in[HorizontalPodAutoscalerConditionsAnnotation]
|
||||
if hasMetricsSpecs || hasBehaviorSpecs || hasMetricsStatuses || hasConditions {
|
||||
out = DeepCopyStringMap(in)
|
||||
delete(out, MetricSpecsAnnotation)
|
||||
delete(out, BehaviorSpecsAnnotation)
|
||||
delete(out, MetricStatusesAnnotation)
|
||||
delete(out, HorizontalPodAutoscalerConditionsAnnotation)
|
||||
return out, true
|
||||
}
|
||||
return in, false
|
||||
}
|
||||
|
||||
// DeepCopyStringMap returns a copy of the input map.
|
||||
// If input is nil, an empty map is returned.
|
||||
func DeepCopyStringMap(in map[string]string) map[string]string {
|
||||
out := make(map[string]string, len(in))
|
||||
for k, v := range in {
|
||||
out[k] = v
|
||||
}
|
||||
return out
|
||||
}
|
3
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD
generated
vendored
@ -30,10 +30,13 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/autoscaling/install:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
98
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go
generated
vendored
98
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion.go
generated
vendored
@ -260,6 +260,10 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(i
|
||||
return err
|
||||
}
|
||||
|
||||
// clear any pre-existing round-trip annotations to make sure the only ones set are ones we produced during conversion
|
||||
annotations, copiedAnnotations := autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
|
||||
out.Annotations = annotations
|
||||
|
||||
otherMetrics := make([]autoscalingv1.MetricSpec, 0, len(in.Spec.Metrics))
|
||||
for _, metric := range in.Spec.Metrics {
|
||||
if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == core.ResourceCPU && metric.Resource.Target.AverageUtilization != nil {
|
||||
@ -289,19 +293,16 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(i
|
||||
}
|
||||
}
|
||||
|
||||
if len(otherMetrics) > 0 || len(in.Status.CurrentMetrics) > 0 || len(currentConditions) > 0 || in.Spec.Behavior != nil {
|
||||
old := out.Annotations
|
||||
out.Annotations = make(map[string]string, len(old)+4)
|
||||
for k, v := range old {
|
||||
out.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(otherMetrics) > 0 {
|
||||
otherMetricsEnc, err := json.Marshal(otherMetrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// copy before mutating
|
||||
if !copiedAnnotations {
|
||||
copiedAnnotations = true
|
||||
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
|
||||
}
|
||||
out.Annotations[autoscaling.MetricSpecsAnnotation] = string(otherMetricsEnc)
|
||||
}
|
||||
|
||||
@ -310,14 +311,25 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(i
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// copy before mutating
|
||||
if !copiedAnnotations {
|
||||
copiedAnnotations = true
|
||||
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
|
||||
}
|
||||
out.Annotations[autoscaling.MetricStatusesAnnotation] = string(currentMetricsEnc)
|
||||
}
|
||||
|
||||
if in.Spec.Behavior != nil {
|
||||
// TODO: this is marshaling an internal type. Fix this without breaking backwards compatibility.
|
||||
behaviorEnc, err := json.Marshal(in.Spec.Behavior)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// copy before mutating
|
||||
if !copiedAnnotations {
|
||||
copiedAnnotations = true
|
||||
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
|
||||
}
|
||||
out.Annotations[autoscaling.BehaviorSpecsAnnotation] = string(behaviorEnc)
|
||||
}
|
||||
|
||||
@ -326,6 +338,11 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(i
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// copy before mutating
|
||||
if !copiedAnnotations {
|
||||
copiedAnnotations = true
|
||||
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
|
||||
}
|
||||
out.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation] = string(currentConditionsEnc)
|
||||
}
|
||||
|
||||
@ -339,47 +356,40 @@ func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(i
|
||||
|
||||
if otherMetricsEnc, hasOtherMetrics := out.Annotations[autoscaling.MetricSpecsAnnotation]; hasOtherMetrics {
|
||||
var otherMetrics []autoscalingv1.MetricSpec
|
||||
if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that
|
||||
outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics))
|
||||
for i, metric := range otherMetrics {
|
||||
if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil {
|
||||
return err
|
||||
if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err == nil {
|
||||
// the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that
|
||||
outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics))
|
||||
for i, metric := range otherMetrics {
|
||||
if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if out.Spec.Metrics != nil {
|
||||
outMetrics[len(otherMetrics)] = out.Spec.Metrics[0]
|
||||
}
|
||||
out.Spec.Metrics = outMetrics
|
||||
}
|
||||
if out.Spec.Metrics != nil {
|
||||
outMetrics[len(otherMetrics)] = out.Spec.Metrics[0]
|
||||
}
|
||||
out.Spec.Metrics = outMetrics
|
||||
delete(out.Annotations, autoscaling.MetricSpecsAnnotation)
|
||||
}
|
||||
|
||||
if behaviorEnc, hasConstraints := out.Annotations[autoscaling.BehaviorSpecsAnnotation]; hasConstraints {
|
||||
// TODO: this is unmarshaling an internal type. Fix this without breaking backwards compatibility.
|
||||
var behavior autoscaling.HorizontalPodAutoscalerBehavior
|
||||
if err := json.Unmarshal([]byte(behaviorEnc), &behavior); err != nil {
|
||||
return err
|
||||
if err := json.Unmarshal([]byte(behaviorEnc), &behavior); err == nil && behavior != (autoscaling.HorizontalPodAutoscalerBehavior{}) {
|
||||
out.Spec.Behavior = &behavior
|
||||
}
|
||||
out.Spec.Behavior = &behavior
|
||||
delete(out.Annotations, autoscaling.BehaviorSpecsAnnotation)
|
||||
}
|
||||
|
||||
if currentMetricsEnc, hasCurrentMetrics := out.Annotations[autoscaling.MetricStatusesAnnotation]; hasCurrentMetrics {
|
||||
// ignore any existing status values -- the ones here have more information
|
||||
var currentMetrics []autoscalingv1.MetricStatus
|
||||
if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics))
|
||||
for i, currentMetric := range currentMetrics {
|
||||
if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil {
|
||||
return err
|
||||
if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err == nil {
|
||||
out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics))
|
||||
for i, currentMetric := range currentMetrics {
|
||||
if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(out.Annotations, autoscaling.MetricStatusesAnnotation)
|
||||
}
|
||||
|
||||
// autoscaling/v1 formerly had an implicit default applied in the controller. In v2beta1, we apply it explicitly.
|
||||
@ -403,19 +413,19 @@ func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(i
|
||||
|
||||
if currentConditionsEnc, hasCurrentConditions := out.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]; hasCurrentConditions {
|
||||
var currentConditions []autoscalingv1.HorizontalPodAutoscalerCondition
|
||||
if err := json.Unmarshal([]byte(currentConditionsEnc), ¤tConditions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out.Status.Conditions = make([]autoscaling.HorizontalPodAutoscalerCondition, len(currentConditions))
|
||||
for i, currentCondition := range currentConditions {
|
||||
if err := Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(¤tCondition, &out.Status.Conditions[i], s); err != nil {
|
||||
return err
|
||||
if err := json.Unmarshal([]byte(currentConditionsEnc), ¤tConditions); err == nil {
|
||||
out.Status.Conditions = make([]autoscaling.HorizontalPodAutoscalerCondition, len(currentConditions))
|
||||
for i, currentCondition := range currentConditions {
|
||||
if err := Convert_v1_HorizontalPodAutoscalerCondition_To_autoscaling_HorizontalPodAutoscalerCondition(¤tCondition, &out.Status.Conditions[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(out.Annotations, autoscaling.HorizontalPodAutoscalerConditionsAnnotation)
|
||||
}
|
||||
|
||||
// drop round-tripping annotations after converting to internal
|
||||
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/BUILD
generated
vendored
@ -39,7 +39,9 @@ go_test(
|
||||
"//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1/conversion.go
generated
vendored
@ -264,18 +264,22 @@ func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutosca
|
||||
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta1_HorizontalPodAutoscaler(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if in.Spec.Behavior != nil {
|
||||
old := out.Annotations
|
||||
out.Annotations = make(map[string]string, len(old)+1)
|
||||
for k, v := range old {
|
||||
out.Annotations[k] = v
|
||||
}
|
||||
|
||||
// clear any pre-existing round-trip annotations to make sure the only ones set are ones we produced during conversion
|
||||
annotations, copiedAnnotations := autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
|
||||
out.Annotations = annotations
|
||||
|
||||
if in.Spec.Behavior != nil {
|
||||
// TODO: this is marshaling an internal type. Fix this without breaking backwards compatibility with n-1 API servers.
|
||||
behaviorEnc, err := json.Marshal(in.Spec.Behavior)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Even if the annotation for behavior exists, we will just overwrite it
|
||||
// copy before mutating
|
||||
if !copiedAnnotations {
|
||||
copiedAnnotations = true
|
||||
out.Annotations = autoscaling.DeepCopyStringMap(out.Annotations)
|
||||
}
|
||||
out.Annotations[autoscaling.BehaviorSpecsAnnotation] = string(behaviorEnc)
|
||||
}
|
||||
|
||||
@ -288,13 +292,17 @@ func Convert_v2beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutosca
|
||||
}
|
||||
|
||||
if behaviorEnc, hasBehaviors := out.Annotations[autoscaling.BehaviorSpecsAnnotation]; hasBehaviors {
|
||||
// TODO: this is unmarshaling an internal type. Fix this without breaking backwards compatibility with n-1 API servers.
|
||||
var behavior autoscaling.HorizontalPodAutoscalerBehavior
|
||||
if err := json.Unmarshal([]byte(behaviorEnc), &behavior); err != nil {
|
||||
return err
|
||||
if err := json.Unmarshal([]byte(behaviorEnc), &behavior); err == nil && behavior != (autoscaling.HorizontalPodAutoscalerBehavior{}) {
|
||||
// only move well-formed data from annotations to fields
|
||||
out.Spec.Behavior = &behavior
|
||||
}
|
||||
out.Spec.Behavior = &behavior
|
||||
delete(out.Annotations, autoscaling.BehaviorSpecsAnnotation)
|
||||
}
|
||||
|
||||
// drop round-tripping annotations after converting to internal
|
||||
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/BUILD
generated
vendored
@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"conversion.go",
|
||||
"defaults.go",
|
||||
"doc.go",
|
||||
"register.go",
|
||||
@ -43,7 +44,14 @@ go_test(
|
||||
srcs = ["defaults_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/autoscaling:go_default_library",
|
||||
"//pkg/apis/autoscaling/install:go_default_library",
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//staging/src/k8s.io/api/autoscaling/v2beta2:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/conversion.go
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/conversion.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2beta2
|
||||
|
||||
import (
|
||||
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
)
|
||||
|
||||
func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *autoscalingv2beta2.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
// v2beta2 round-trips to internal without any serialized annotations, make sure any from other versions don't get serialized
|
||||
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *autoscalingv2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
if err := autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
// v2beta2 round-trips to internal without any serialized annotations, make sure any from other versions don't get serialized
|
||||
out.Annotations, _ = autoscaling.DropRoundTripHorizontalPodAutoscalerAnnotations(out.Annotations)
|
||||
return nil
|
||||
}
|
54
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/zz_generated.conversion.go
generated
vendored
54
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v2beta2/zz_generated.conversion.go
generated
vendored
@ -90,16 +90,6 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*v2beta2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*v2beta2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*v2beta2.HorizontalPodAutoscaler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v2beta2.HorizontalPodAutoscalerBehavior)(nil), (*autoscaling.HorizontalPodAutoscalerBehavior)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(a.(*v2beta2.HorizontalPodAutoscalerBehavior), b.(*autoscaling.HorizontalPodAutoscalerBehavior), scope)
|
||||
}); err != nil {
|
||||
@ -260,6 +250,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*autoscaling.HorizontalPodAutoscaler)(nil), (*v2beta2.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(a.(*autoscaling.HorizontalPodAutoscaler), b.(*v2beta2.HorizontalPodAutoscaler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddConversionFunc((*v2beta2.HorizontalPodAutoscaler)(nil), (*autoscaling.HorizontalPodAutoscaler)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(a.(*v2beta2.HorizontalPodAutoscaler), b.(*autoscaling.HorizontalPodAutoscaler), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -406,11 +406,6 @@ func autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAut
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler is an autogenerated conversion function.
|
||||
func Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *v2beta2.HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
return autoConvert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *v2beta2.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2beta2_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
@ -422,11 +417,6 @@ func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAut
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler is an autogenerated conversion function.
|
||||
func Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *v2beta2.HorizontalPodAutoscaler, s conversion.Scope) error {
|
||||
return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v2beta2_HorizontalPodAutoscalerBehavior_To_autoscaling_HorizontalPodAutoscalerBehavior(in *v2beta2.HorizontalPodAutoscalerBehavior, out *autoscaling.HorizontalPodAutoscalerBehavior, s conversion.Scope) error {
|
||||
out.ScaleUp = (*autoscaling.HPAScalingRules)(unsafe.Pointer(in.ScaleUp))
|
||||
out.ScaleDown = (*autoscaling.HPAScalingRules)(unsafe.Pointer(in.ScaleDown))
|
||||
@ -479,7 +469,17 @@ func Convert_autoscaling_HorizontalPodAutoscalerCondition_To_v2beta2_HorizontalP
|
||||
|
||||
func autoConvert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *v2beta2.HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items))
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]autoscaling.HorizontalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v2beta2_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -490,7 +490,17 @@ func Convert_v2beta2_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAut
|
||||
|
||||
func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2beta2_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *v2beta2.HorizontalPodAutoscalerList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]v2beta2.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items))
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]v2beta2.HorizontalPodAutoscaler, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v2beta2_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
@ -134,9 +134,15 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
return currentReplicas, utilization, rawUtilization, timestamp, nil
|
||||
}
|
||||
|
||||
newReplicas := int32(math.Ceil(newUsageRatio * float64(len(metrics))))
|
||||
if (newUsageRatio < 1.0 && newReplicas > currentReplicas) || (newUsageRatio > 1.0 && newReplicas < currentReplicas) {
|
||||
// return the current replicas if the change of metrics length would cause a change in scale direction
|
||||
return currentReplicas, utilization, rawUtilization, timestamp, nil
|
||||
}
|
||||
|
||||
// return the result, where the number of replicas considered is
|
||||
// however many replicas factored into our calculation
|
||||
return int32(math.Ceil(newUsageRatio * float64(len(metrics)))), utilization, rawUtilization, timestamp, nil
|
||||
return newReplicas, utilization, rawUtilization, timestamp, nil
|
||||
}
|
||||
|
||||
// GetRawResourceReplicas calculates the desired replica count based on a target resource utilization (as a raw milli-value)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
@ -415,7 +415,7 @@ func (ds *dockerService) ContainerStatus(_ context.Context, req *runtimeapi.Cont
|
||||
|
||||
labels, annotations := extractLabels(r.Config.Labels)
|
||||
imageName := r.Config.Image
|
||||
if len(ir.RepoTags) > 0 {
|
||||
if ir != nil && len(ir.RepoTags) > 0 {
|
||||
imageName = ir.RepoTags[0]
|
||||
}
|
||||
status := &runtimeapi.ContainerStatus{
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/resource_metrics.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/metrics/collectors/resource_metrics.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds",
|
||||
nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds_total",
|
||||
"Cumulative cpu time consumed by the node in core-seconds",
|
||||
nil,
|
||||
nil,
|
||||
@ -40,7 +40,7 @@ var (
|
||||
metrics.ALPHA,
|
||||
"")
|
||||
|
||||
containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds",
|
||||
containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds_total",
|
||||
"Cumulative cpu time consumed by the container in core-seconds",
|
||||
[]string{"container", "pod", "namespace"},
|
||||
nil,
|
||||
@ -120,7 +120,7 @@ func (rc *resourceMetricsCollector) collectNodeCPUMetrics(ch chan<- metrics.Metr
|
||||
}
|
||||
|
||||
ch <- metrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
|
||||
metrics.NewLazyConstMetric(nodeCPUUsageDesc, metrics.GaugeValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second)))
|
||||
metrics.NewLazyConstMetric(nodeCPUUsageDesc, metrics.CounterValue, float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second)))
|
||||
}
|
||||
|
||||
func (rc *resourceMetricsCollector) collectNodeMemoryMetrics(ch chan<- metrics.Metric, s summary.NodeStats) {
|
||||
@ -138,7 +138,7 @@ func (rc *resourceMetricsCollector) collectContainerCPUMetrics(ch chan<- metrics
|
||||
}
|
||||
|
||||
ch <- metrics.NewLazyMetricWithTimestamp(s.CPU.Time.Time,
|
||||
metrics.NewLazyConstMetric(containerCPUUsageDesc, metrics.GaugeValue,
|
||||
metrics.NewLazyConstMetric(containerCPUUsageDesc, metrics.CounterValue,
|
||||
float64(*s.CPU.UsageCoreNanoSeconds)/float64(time.Second), s.Name, pod.PodRef.Name, pod.PodRef.Namespace))
|
||||
}
|
||||
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
@ -428,6 +428,13 @@ func (cache *schedulerCache) addPod(pod *v1.Pod) {
|
||||
|
||||
// Assumes that lock is already acquired.
|
||||
func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error {
|
||||
if _, ok := cache.nodes[newPod.Spec.NodeName]; !ok {
|
||||
// The node might have been deleted already.
|
||||
// This is not a problem in the case where a pod update arrives before the
|
||||
// node creation, because we will always have a create pod event before
|
||||
// that, which will create the placeholder node item.
|
||||
return nil
|
||||
}
|
||||
if err := cache.removePod(oldPod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_max_disk_count.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/azure_dd_max_disk_count.go
generated
vendored
@ -89,6 +89,8 @@ var maxDataDiskCountMap = map[string]int64{
|
||||
"STANDARD_D3_V2": 16,
|
||||
"STANDARD_D3_V2_PROMO": 16,
|
||||
"STANDARD_D4": 32,
|
||||
"STANDARD_D48AS_V4": 32,
|
||||
"STANDARD_D48A_V4": 32,
|
||||
"STANDARD_D48S_V3": 32,
|
||||
"STANDARD_D48_V3": 32,
|
||||
"STANDARD_D4AS_V4": 8,
|
||||
@ -99,18 +101,21 @@ var maxDataDiskCountMap = map[string]int64{
|
||||
"STANDARD_D4_V3": 8,
|
||||
"STANDARD_D5_V2": 64,
|
||||
"STANDARD_D5_V2_PROMO": 64,
|
||||
"STANDARD_D64AS_V4": 32,
|
||||
"STANDARD_D64A_V4": 32,
|
||||
"STANDARD_D64S_V3": 32,
|
||||
"STANDARD_D64_V3": 32,
|
||||
"STANDARD_D8AS_V4": 16,
|
||||
"STANDARD_D8A_V4": 16,
|
||||
"STANDARD_D8S_V3": 16,
|
||||
"STANDARD_D8_V3": 16,
|
||||
"STANDARD_D96AS_V4": 32,
|
||||
"STANDARD_D96A_V4": 32,
|
||||
"STANDARD_DC1S_V2": 1,
|
||||
"STANDARD_DC2S": 2,
|
||||
"STANDARD_DC2S_V2": 2,
|
||||
"STANDARD_DC4S": 4,
|
||||
"STANDARD_DC4S_V2": 4,
|
||||
"STANDARD_DC8S": 8,
|
||||
"STANDARD_DC8_V2": 8,
|
||||
"STANDARD_DS11-1_V2": 8,
|
||||
"STANDARD_DS11": 8,
|
||||
@ -167,6 +172,7 @@ var maxDataDiskCountMap = map[string]int64{
|
||||
"STANDARD_E32_V3": 32,
|
||||
"STANDARD_E4-2S_V3": 8,
|
||||
"STANDARD_E48AS_V4": 32,
|
||||
"STANDARD_E48A_V4": 32,
|
||||
"STANDARD_E48S_V3": 32,
|
||||
"STANDARD_E48_V3": 32,
|
||||
"STANDARD_E4AS_V4": 8,
|
||||
@ -176,6 +182,7 @@ var maxDataDiskCountMap = map[string]int64{
|
||||
"STANDARD_E64-16S_V3": 32,
|
||||
"STANDARD_E64-32S_V3": 32,
|
||||
"STANDARD_E64AS_V4": 32,
|
||||
"STANDARD_E64A_V4": 32,
|
||||
"STANDARD_E64IS_V3": 32,
|
||||
"STANDARD_E64I_V3": 32,
|
||||
"STANDARD_E64S_V3": 32,
|
||||
@ -187,6 +194,7 @@ var maxDataDiskCountMap = map[string]int64{
|
||||
"STANDARD_E8S_V3": 16,
|
||||
"STANDARD_E8_V3": 16,
|
||||
"STANDARD_E96AS_V4": 32,
|
||||
"STANDARD_E96A_V4": 32,
|
||||
"STANDARD_F1": 4,
|
||||
"STANDARD_F16": 64,
|
||||
"STANDARD_F16S": 64,
|
||||
@ -301,13 +309,17 @@ var maxDataDiskCountMap = map[string]int64{
|
||||
"STANDARD_NV12_PROMO": 48,
|
||||
"STANDARD_NV12S_V2": 24,
|
||||
"STANDARD_NV12S_V3": 12,
|
||||
"STANDARD_NV16AS_V4": 32,
|
||||
"STANDARD_NV24": 64,
|
||||
"STANDARD_NV24_PROMO": 64,
|
||||
"STANDARD_NV24S_V2": 32,
|
||||
"STANDARD_NV24S_V3": 24,
|
||||
"STANDARD_NV32AS_V4": 32,
|
||||
"STANDARD_NV48S_V3": 32,
|
||||
"STANDARD_NV4AS_V4": 8,
|
||||
"STANDARD_NV6": 24,
|
||||
"STANDARD_NV6_PROMO": 24,
|
||||
"STANDARD_NV6S_V2": 12,
|
||||
"STANDARD_NV8AS_V4": 16,
|
||||
"STANDARD_PB6S": 12,
|
||||
}
|
||||
|
@ -153,6 +153,9 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpointslice-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods", "nodes").RuleOrDie(),
|
||||
// The controller needs to be able to set a service's finalizers to be able to create an EndpointSlice
|
||||
// resource that is owned by the service and sets blockOwnerDeletion=true in its ownerRef.
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("services/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "create", "update", "delete").Groups(discoveryGroup).Resources("endpointslices").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
|
4
vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go
generated
vendored
4
vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go
generated
vendored
@ -490,8 +490,8 @@ func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, e
|
||||
}
|
||||
|
||||
zone := cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: to.String(vm.Location),
|
||||
FailureDomain: strings.ToLower(failureDomain),
|
||||
Region: strings.ToLower(to.String(vm.Location)),
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
10
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
10
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
@ -380,8 +380,8 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: failureDomain,
|
||||
Region: to.String(vm.Location),
|
||||
FailureDomain: strings.ToLower(failureDomain),
|
||||
Region: strings.ToLower(to.String(vm.Location)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -1101,6 +1101,9 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
|
||||
|
||||
// Update VMs with best effort that have already been added to nodeUpdates.
|
||||
for meta, update := range nodeUpdates {
|
||||
// create new instance of meta and update for passing to anonymous function
|
||||
meta := meta
|
||||
update := update
|
||||
hostUpdates = append(hostUpdates, func() error {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
@ -1401,6 +1404,9 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
|
||||
|
||||
// Update VMs with best effort that have already been added to nodeUpdates.
|
||||
for meta, update := range nodeUpdates {
|
||||
// create new instance of meta and update for passing to anonymous function
|
||||
meta := meta
|
||||
update := update
|
||||
hostUpdates = append(hostUpdates, func() error {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
4
vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go
generated
vendored
4
vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go
generated
vendored
@ -78,8 +78,8 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
}
|
||||
|
||||
return cloudprovider.Zone{
|
||||
FailureDomain: zone,
|
||||
Region: location,
|
||||
FailureDomain: strings.ToLower(zone),
|
||||
Region: strings.ToLower(location),
|
||||
}, nil
|
||||
}
|
||||
// if UseInstanceMetadata is false, get Zone name by calling ARM
|
||||
|
13
vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go
generated
vendored
13
vendor/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_internal.go
generated
vendored
@ -45,6 +45,8 @@ const (
|
||||
ILBFinalizerV1 = "gke.networking.io/l4-ilb-v1"
|
||||
// ILBFinalizerV2 is the finalizer used by newer controllers that implement Internal LoadBalancer services.
|
||||
ILBFinalizerV2 = "gke.networking.io/l4-ilb-v2"
|
||||
// maxInstancesPerInstanceGroup defines maximum number of VMs per InstanceGroup.
|
||||
maxInstancesPerInstanceGroup = 1000
|
||||
)
|
||||
|
||||
func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
@ -512,6 +514,17 @@ func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node)
|
||||
kubeNodes.Insert(n.Name)
|
||||
}
|
||||
|
||||
// Individual InstanceGroup has a limit for 1000 instances in it.
|
||||
// As a result, it's not possible to add more to it.
|
||||
// Given that the long-term fix (AlphaFeatureILBSubsets) is already in-progress,
|
||||
// to stop the bleeding we now simply cut down the contents to first 1000
|
||||
// instances in the alphabetical order. Since there is a limitation for
|
||||
// 250 backend VMs for ILB, this isn't making things worse.
|
||||
if len(kubeNodes) > maxInstancesPerInstanceGroup {
|
||||
klog.Warningf("Limiting number of VMs for InstanceGroup %s to %d", name, maxInstancesPerInstanceGroup)
|
||||
kubeNodes = sets.NewString(kubeNodes.List()[:maxInstancesPerInstanceGroup]...)
|
||||
}
|
||||
|
||||
gceNodes := sets.NewString()
|
||||
if ig == nil {
|
||||
klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone)
|
||||
|
44
vendor/modules.txt
vendored
44
vendor/modules.txt
vendored
@ -642,8 +642,6 @@ github.com/mitchellh/mapstructure
|
||||
github.com/modern-go/concurrent
|
||||
# github.com/modern-go/reflect2 v1.0.1
|
||||
github.com/modern-go/reflect2
|
||||
# github.com/morikuni/aec v1.0.0
|
||||
github.com/morikuni/aec
|
||||
# github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618
|
||||
github.com/mrunalp/fileutils
|
||||
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
@ -1134,7 +1132,7 @@ gopkg.in/square/go-jose.v2/jwt
|
||||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.2.8
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.0-k3s.1
|
||||
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/admissionregistration/v1
|
||||
@ -1178,7 +1176,7 @@ k8s.io/api/settings/v1alpha1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.0-k3s.1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
@ -1218,7 +1216,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.0-k3s.1
|
||||
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/meta
|
||||
@ -1280,7 +1278,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.0-k3s.1
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
k8s.io/apiserver/pkg/admission/initializer
|
||||
@ -1410,7 +1408,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.0-k3s.1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
@ -1423,7 +1421,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.0-k3s.1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
k8s.io/client-go/discovery/cached/disk
|
||||
@ -1657,7 +1655,7 @@ k8s.io/client-go/util/jsonpath
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.0-k3s.1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
@ -1665,13 +1663,13 @@ k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.0-k3s.1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.0-k3s.1
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
@ -1686,7 +1684,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.0-k3s.1
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
k8s.io/component-base/codec
|
||||
@ -1704,10 +1702,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
|
||||
k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.0-k3s.1
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.0-k3s.1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
||||
@ -1722,7 +1720,7 @@ k8s.io/gengo/types
|
||||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog v1.0.0
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.0-k3s.1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
@ -1750,7 +1748,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.0-k3s.1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
@ -1761,14 +1759,14 @@ k8s.io/kube-openapi/pkg/schemaconv
|
||||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.0-k3s.1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.0-k3s.1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
k8s.io/kube-scheduler/config/v1alpha2
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.0-k3s.1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
@ -1843,11 +1841,11 @@ k8s.io/kubectl/pkg/util/storage
|
||||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.0-k3s.1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.0-k3s.1
|
||||
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.2-k3s.1
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||
@ -2590,7 +2588,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.0-k3s.1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
@ -2621,7 +2619,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.0-k3s.1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
|
Loading…
Reference in New Issue
Block a user