mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Merge pull request #2137 from brandond/master-v1.18.8-k3s1
Update master to v1.18.8-k3s1
This commit is contained in:
commit
7508ef48d2
@ -6,7 +6,7 @@ ARG DAPPER_HOST_ARCH
|
||||
ENV ARCH $DAPPER_HOST_ARCH
|
||||
|
||||
RUN if [ "${ARCH}" == "amd64" ] || [ "${ARCH}" == "arm64" ]; then \
|
||||
VERSION=0.18.3 OS=linux && \
|
||||
VERSION=0.18.4 OS=linux && \
|
||||
curl -sL "https://github.com/vmware-tanzu/sonobuoy/releases/download/v${VERSION}/sonobuoy_${VERSION}_${OS}_${ARCH}.tar.gz" | \
|
||||
tar -xzf - -C /usr/local/bin; \
|
||||
fi
|
||||
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@ -1,4 +1,4 @@
|
||||
OS = (ENV['OS'] || "alpine310")
|
||||
OS = (ENV['OS'] || "alpine312")
|
||||
BOX_REPO = (ENV['BOX_REPO'] || "generic")
|
||||
HOME = ENV['HOME']
|
||||
PROJ_HOME = File.dirname(__FILE__)
|
||||
|
50
go.mod
50
go.mod
@ -33,31 +33,31 @@ replace (
|
||||
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
|
||||
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.6-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.6-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.6-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.6-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.6-k3s1
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.8-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.8-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.8-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.8-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.8-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
|
88
go.sum
88
go.sum
@ -203,6 +203,7 @@ github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83 h1:ngHdSomn2My
|
||||
github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83/go.mod h1:v6o7m/E9bfvm79dE1iFiF+3T7zLBnrjYjkWMa1J+Hv0=
|
||||
github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=
|
||||
github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
@ -435,6 +436,7 @@ github.com/insomniacslk/dhcp v0.0.0-20200420235442-ed3125c2efe7/go.mod h1:CfMdgu
|
||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||
github.com/jamescun/tuntap v0.0.0-20190712092105-cb1fb277045c/go.mod h1:zzwpsgcYhzzIP5WyF8g9ivCv38cY9uAV9Gu0m3lThhE=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jetstack/cert-manager v0.7.2/go.mod h1:nbddmhjWxYGt04bxvwVGUSeLhZ2PCyNvd7MpXdq+yWY=
|
||||
github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
@ -642,49 +644,49 @@ github.com/rancher/helm-controller v0.6.5 h1:gL6R3fbsBFBnrp2Wc36zn0zLQ8q2ckbLpfa
|
||||
github.com/rancher/helm-controller v0.6.5/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
|
||||
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
|
||||
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
|
||||
github.com/rancher/kubernetes v1.18.6-k3s1 h1:UyO6rKHsFg48cG7V19Ji0XPXScNxPJRlPPYWBKMmo6Y=
|
||||
github.com/rancher/kubernetes v1.18.6-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1 h1:sIvPhLYuV3aZgYaQYASk2E9R+bB1XVcp/6d02mjgEH8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1 h1:W104+V2TFDIaV8IPsYQnnw8MnVtvmnl8jNUnAkkLpeI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1 h1:rr7Ogkk2YC89UNr/ZBywG82tuu9PlB2iqqtPS4I04n4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1 h1:Zd1pgZtAZS+lMfE9JRm7niTJx5W/WIxFwyA9ABGIhT8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1 h1:Uf6GBlxBaeJMevfwRWX8wbyJw5fIioxqIF9RMcfQSU4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1 h1:qRfoeAUJQbBN9/tIf50sq5aHaG+j46201yIv86Dze0g=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1 h1:Pox7xyrB5KgaV0oZFwH6Jy0BWI+Wtj04P62awKbvXXE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1 h1:BXGJkd7+sag5QHSlPBzYc8Q5uepBpXEb3cmLlgjQywQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1 h1:JR3PwOSBf055j96E3ADPK9/X5PrI7rHazsKb1AdZ/Gc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1 h1:ItVL3pMrVTnwPkIwEc3Y+5NYZWoz6nLwFrgLeNZGeG0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1 h1:ot08wSXmCweEe5jTkaq4AAMlY+A2SvQRlqQi4JEJnQA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1 h1:9ofjiqVUSRKQEMdessklWJRrgYPrBLDf9p4MMfKfZ5Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1 h1:ug5WXTP2p0PVKYay4YrbLemYr0XNKqa1LxgHK8n5tGU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1 h1:yQq7RCyBaO03uzYJlnM8wyhe20Nn03KGui68CDny4qI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1 h1:3+ND6LW6WIhVRjcpsfEupwZzsHjOUfsigomhQWhF/Ag=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1 h1:j9vNF6C374+ZbyRVArxvvw1JRHosBZe5Se8IinljIAI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1 h1:3w7OKA5o91NYoUZ4dYHRmLBLGWCM6PPVzywD8bkswsA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1 h1:9ZNoycDU5eIHFeF8YqcGLAlm9HeQDg8tXNvg46bFf8I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1 h1:pGHcUlVbY6TBhu0QinG1e+vN33O4aGgB8nN5uujln1Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1 h1:eUs9gsRU4YZ3f9MPniiLsCLvVHj8/DtcViQH9kGh8Qg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.6-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
||||
github.com/rancher/kubernetes v1.18.8-k3s1 h1:dqukpA38pR5V7opoTDDslVYIxlM3sP5535u3Zd23W2I=
|
||||
github.com/rancher/kubernetes v1.18.8-k3s1/go.mod h1:SU7bBi8ZNHRjqzNhY4U78gClS1O7Q7avCrfF5aSiDko=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1 h1:GEbvokWECNmp/nZm40JeQ/DGa1riPeKFDqJZs/VPQMU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1 h1:lK7L5DnztJd9jcgfYwsXWRHq2EHPAgiD7hIX/9HUVJE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1 h1:LNn6Vx4nNHDc+ckqBKsRbpbm+Eh0kWHvCvpC0np3JVM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1/go.mod h1:EWaS4Y9oElqOAwD3kKDYBACo+zW/N3epe1THeEbG72k=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1 h1:FmUl8p1damR3F5GxMtXM4tteIr/a0Akx+48qlU7hOKA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1/go.mod h1:iiHJKeJoHT/SawjIpPfHQ+5o47HW8mlzjYvADYbnHrk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1 h1:gJ9/3Vaen+SdjVQsCScCY5/zcZ/sLOqlOhlTdPUoD8s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1/go.mod h1:Las5K5JupAs7RlnSvh6AiOsz0P0t3zylPnqQ9RDxaGA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1 h1:KTmdV7Egc777OeJqs6F3CurMSJlUE2HSr6nomO1G900=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1/go.mod h1:/LATWw92UfCIu8M1NjrVaOtVJ9buBJZS9Zvj0BtY5Ac=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1 h1:s/7BrWAaJX9ngv+q3LmkkaKpozIM3gOcWPEXbxFDqxc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1 h1:ZEG20//RPRbrKX1EVpsZN8jASYKXcwVDXPk9+o0l27Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1 h1:DzakaPPGg9RHo81xa65tR0k6Ds8xmHpaH+OLG35y+Nk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1 h1:eiuJQhX0XaU/XVFIxMKbuKUXbt5c2vAl7dsaQeuB+Zg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1 h1:G+2GcKHBfsnnyj5Fuqj6ks6DG6hEQyZrIloRxYHV1lw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1 h1:x0Z1PPkojOpsrQbjIQoZQ9Tie7X5h/17YvluEtVks0Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1 h1:COXcOjKFrmfayOoZT4OmzTlo0JdEcbOkm5YKLa4FTg0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1 h1:NyD3nsuNkr6Gq/kyLJvUU941fwmtAwVSec14oSKm84g=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1 h1:0LkqtqPCq8UssLzSNQo1u+r9tqXQZaXMT05RJ90SemA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1 h1:tb76wY82Q/BwvXZNt+Vdxkm+AEa6UQw47btLa2OeIGo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1 h1:ya9WTAduoFNKHsB/sMdXJPApMr58YSUyXRoJH0nhLOI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1/go.mod h1:YK9Z0Z/3MCo+LC6HsodGE8zKhQp8Z9btmCMh+Yi673g=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1 h1:dhWex7SjpjQ5/iZEo+I3YjIOaQwUFudcE58Hkxgq0Z0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1 h1:Sfhr2dUomwUq0b3p5/PjKBxGpz2+rz7ucp/GriEdVcA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1 h1:CPxpH4yeNeta/9cOdX8QGiR6v3RoJz5M9EKmYNX3rB0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.8-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||
github.com/rancher/nocode v0.0.0-20200630202308-cb097102c09f/go.mod h1:iAAt6Amgbysi6srDJs9SxGSbG2j/JSRb/xCrnEtA69g=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/util/sets/BUILD
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/sets/BUILD
generated
vendored
@ -38,7 +38,7 @@ go_genrule(
|
||||
],
|
||||
cmd = """
|
||||
$(location //vendor/k8s.io/code-generator/cmd/set-gen) \
|
||||
--input-dirs k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/types \
|
||||
--input-dirs k8s.io/apimachinery/pkg/util/sets/types \
|
||||
--output-base $$(dirname $$(dirname $(location :byte.go))) \
|
||||
--go-header-file $(location //hack/boilerplate:boilerplate.generatego.txt) \
|
||||
--output-package sets
|
||||
|
2
vendor/k8s.io/apiserver/pkg/server/healthz/BUILD
generated
vendored
2
vendor/k8s.io/apiserver/pkg/server/healthz/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_test(
|
||||
srcs = ["healthz_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library",
|
||||
@ -31,7 +32,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
18
vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go
generated
vendored
18
vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -29,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
"k8s.io/apiserver/pkg/server/httplog"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
@ -82,16 +82,20 @@ func (l *log) Check(_ *http.Request) error {
|
||||
return fmt.Errorf("logging blocked")
|
||||
}
|
||||
|
||||
type cacheSyncWaiter interface {
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||
}
|
||||
|
||||
type informerSync struct {
|
||||
sharedInformerFactory informers.SharedInformerFactory
|
||||
cacheSyncWaiter cacheSyncWaiter
|
||||
}
|
||||
|
||||
var _ HealthChecker = &informerSync{}
|
||||
|
||||
// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given sharedInformerFactory sync.
|
||||
func NewInformerSyncHealthz(sharedInformerFactory informers.SharedInformerFactory) HealthChecker {
|
||||
// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given cacheSyncWaiter sync.
|
||||
func NewInformerSyncHealthz(cacheSyncWaiter cacheSyncWaiter) HealthChecker {
|
||||
return &informerSync{
|
||||
sharedInformerFactory: sharedInformerFactory,
|
||||
cacheSyncWaiter: cacheSyncWaiter,
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,8 +108,8 @@ func (i *informerSync) Check(_ *http.Request) error {
|
||||
// Close stopCh to force checking if informers are synced now.
|
||||
close(stopCh)
|
||||
|
||||
var informersByStarted map[bool][]string
|
||||
for informerType, started := range i.sharedInformerFactory.WaitForCacheSync(stopCh) {
|
||||
informersByStarted := make(map[bool][]string)
|
||||
for informerType, started := range i.cacheSyncWaiter.WaitForCacheSync(stopCh) {
|
||||
informersByStarted[started] = append(informersByStarted[started], informerType.String())
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "18"
|
||||
gitVersion = "v1.18.6-k3s1"
|
||||
gitCommit = "60da51f880b8e78467c08510bde06c1b3b2dedbe"
|
||||
gitVersion = "v1.18.8-k3s1"
|
||||
gitCommit = "b86d0e4a07fd882c2f9718f4e82b06dfd4b55195"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-07-15T21:57:06Z"
|
||||
buildDate = "2020-08-13T18:53:34Z"
|
||||
)
|
||||
|
7
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
7
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
@ -20,8 +20,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b h1:vCplRbYcTTeBVLjIU0KvipEeVBSxl6sakUBRmeLBTkw=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
@ -65,6 +65,7 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@ -93,6 +94,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "18"
|
||||
gitVersion = "v1.18.6-k3s1"
|
||||
gitCommit = "60da51f880b8e78467c08510bde06c1b3b2dedbe"
|
||||
gitVersion = "v1.18.8-k3s1"
|
||||
gitCommit = "b86d0e4a07fd882c2f9718f4e82b06dfd4b55195"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-07-15T21:57:06Z"
|
||||
buildDate = "2020-08-13T18:53:34Z"
|
||||
)
|
||||
|
4
vendor/k8s.io/csi-translation-lib/go.sum
generated
vendored
4
vendor/k8s.io/csi-translation-lib/go.sum
generated
vendored
@ -20,7 +20,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
@ -58,6 +58,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@ -84,6 +85,7 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/OWNERS
generated
vendored
@ -7,6 +7,7 @@ approvers:
|
||||
- tallclair
|
||||
- vishh
|
||||
- yujuhong
|
||||
- dashpole
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
||||
labels:
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/apis/podresources/OWNERS
generated
vendored
@ -1,4 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/cadvisor/OWNERS
generated
vendored
@ -1,5 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
||||
- sjenning
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/policy_static.go
generated
vendored
@ -338,15 +338,16 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
|
||||
}
|
||||
klog.Infof("[cpumanager] Regenerating TopologyHints for CPUs already allocated to (pod %v, container %v)", string(pod.UID), container.Name)
|
||||
return map[string][]topologymanager.TopologyHint{
|
||||
string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, requested),
|
||||
string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, cpuset.CPUSet{}, requested),
|
||||
}
|
||||
}
|
||||
|
||||
// Get a list of available CPUs.
|
||||
available := p.assignableCPUs(s)
|
||||
reusable := p.cpusToReuse[string(pod.UID)]
|
||||
|
||||
// Generate hints.
|
||||
cpuHints := p.generateCPUTopologyHints(available, requested)
|
||||
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
|
||||
klog.Infof("[cpumanager] TopologyHints generated for pod '%v', container '%v': %v", pod.Name, container.Name, cpuHints)
|
||||
|
||||
return map[string][]topologymanager.TopologyHint{
|
||||
@ -360,7 +361,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
|
||||
// It follows the convention of marking all hints that have the same number of
|
||||
// bits set as the narrowest matching NUMANodeAffinity with 'Preferred: true', and
|
||||
// marking all others with 'Preferred: false'.
|
||||
func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
|
||||
func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reusableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
|
||||
// Initialize minAffinitySize to include all NUMA Nodes.
|
||||
minAffinitySize := p.topology.CPUDetails.NUMANodes().Size()
|
||||
// Initialize minSocketsOnMinAffinity to include all Sockets.
|
||||
@ -380,16 +381,25 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, req
|
||||
}
|
||||
}
|
||||
|
||||
// Then check to see if we have enough CPUs available on the current
|
||||
// socket bitmask to satisfy the CPU request.
|
||||
// Then check to see if all of the reusable CPUs are part of the bitmask.
|
||||
numMatching := 0
|
||||
for _, c := range reusableCPUs.ToSlice() {
|
||||
// Disregard this mask if its NUMANode isn't part of it.
|
||||
if !mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) {
|
||||
return
|
||||
}
|
||||
numMatching++
|
||||
}
|
||||
|
||||
// Finally, check to see if enough available CPUs remain on the current
|
||||
// NUMA node combination to satisfy the CPU request.
|
||||
for _, c := range availableCPUs.ToSlice() {
|
||||
if mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) {
|
||||
numMatching++
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't, then move onto the next combination.
|
||||
// If they don't, then move onto the next combination.
|
||||
if numMatching < request {
|
||||
return
|
||||
}
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/topology_hints.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog"
|
||||
pluginapi "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
|
||||
)
|
||||
@ -57,21 +58,22 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
|
||||
continue
|
||||
}
|
||||
klog.Infof("[devicemanager] Regenerating TopologyHints for resource '%v' already allocated to (pod %v, container %v)", resource, string(pod.UID), container.Name)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, requested)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, allocated, sets.String{}, requested)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the list of available devices, for which TopologyHints should be generated.
|
||||
available := m.getAvailableDevices(resource)
|
||||
if available.Len() < requested {
|
||||
klog.Errorf("[devicemanager] Unable to generate topology hints: requested number of devices unavailable for '%s': requested: %d, available: %d", resource, requested, available.Len())
|
||||
reusable := m.devicesToReuse[string(pod.UID)][resource]
|
||||
if available.Union(reusable).Len() < requested {
|
||||
klog.Errorf("[devicemanager] Unable to generate topology hints: requested number of devices unavailable for '%s': requested: %d, available: %d", resource, requested, available.Union(reusable).Len())
|
||||
deviceHints[resource] = []topologymanager.TopologyHint{}
|
||||
continue
|
||||
}
|
||||
|
||||
// Generate TopologyHints for this resource given the current
|
||||
// request size and the list of available devices.
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, requested)
|
||||
deviceHints[resource] = m.generateDeviceTopologyHints(resource, available, reusable, requested)
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,7 +95,7 @@ func (m *ManagerImpl) getAvailableDevices(resource string) sets.String {
|
||||
return m.healthyDevices[resource].Difference(m.allocatedDevices[resource])
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, devices sets.String, request int) []topologymanager.TopologyHint {
|
||||
func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available sets.String, reusable sets.String, request int) []topologymanager.TopologyHint {
|
||||
// Initialize minAffinitySize to include all NUMA Nodes
|
||||
minAffinitySize := len(m.numaNodes)
|
||||
|
||||
@ -103,36 +105,37 @@ func (m *ManagerImpl) generateDeviceTopologyHints(resource string, devices sets.
|
||||
// First, update minAffinitySize for the current request size.
|
||||
devicesInMask := 0
|
||||
for _, device := range m.allDevices[resource] {
|
||||
if device.Topology == nil {
|
||||
continue
|
||||
}
|
||||
for _, node := range device.Topology.Nodes {
|
||||
if mask.IsSet(int(node.ID)) {
|
||||
devicesInMask++
|
||||
break
|
||||
}
|
||||
if mask.AnySet(m.getNUMANodeIds(device.Topology)) {
|
||||
devicesInMask++
|
||||
}
|
||||
}
|
||||
if devicesInMask >= request && mask.Count() < minAffinitySize {
|
||||
minAffinitySize = mask.Count()
|
||||
}
|
||||
|
||||
// Then check to see if we have enough devices available on the current
|
||||
// NUMA Node combination to satisfy the device request.
|
||||
// Then check to see if all of the reusable devices are part of the bitmask.
|
||||
numMatching := 0
|
||||
for d := range devices {
|
||||
for d := range reusable {
|
||||
// Skip the device if it doesn't specify any topology info.
|
||||
if m.allDevices[resource][d].Topology == nil {
|
||||
continue
|
||||
}
|
||||
for _, node := range m.allDevices[resource][d].Topology.Nodes {
|
||||
if mask.IsSet(int(node.ID)) {
|
||||
numMatching++
|
||||
break
|
||||
}
|
||||
// Otherwise disregard this mask if its NUMANode isn't part of it.
|
||||
if !mask.AnySet(m.getNUMANodeIds(m.allDevices[resource][d].Topology)) {
|
||||
return
|
||||
}
|
||||
numMatching++
|
||||
}
|
||||
|
||||
// Finally, check to see if enough available devices remain on the
|
||||
// current NUMA node combination to satisfy the device request.
|
||||
for d := range available {
|
||||
if mask.AnySet(m.getNUMANodeIds(m.allDevices[resource][d].Topology)) {
|
||||
numMatching++
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't, then move onto the next combination.
|
||||
// If they don't, then move onto the next combination.
|
||||
if numMatching < request {
|
||||
return
|
||||
}
|
||||
@ -158,3 +161,14 @@ func (m *ManagerImpl) generateDeviceTopologyHints(resource string, devices sets.
|
||||
|
||||
return hints
|
||||
}
|
||||
|
||||
func (m *ManagerImpl) getNUMANodeIds(topology *pluginapi.TopologyInfo) []int {
|
||||
if topology == nil {
|
||||
return nil
|
||||
}
|
||||
var ids []int
|
||||
for _, n := range topology.Nodes {
|
||||
ids = append(ids, int(n.ID))
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask/bitmask.go
generated
vendored
@ -33,6 +33,7 @@ type BitMask interface {
|
||||
IsEqual(mask BitMask) bool
|
||||
IsEmpty() bool
|
||||
IsSet(bit int) bool
|
||||
AnySet(bits []int) bool
|
||||
IsNarrowerThan(mask BitMask) bool
|
||||
String() string
|
||||
Count() int
|
||||
@ -120,6 +121,16 @@ func (s *bitMask) IsSet(bit int) bool {
|
||||
return (*s & (1 << uint64(bit))) > 0
|
||||
}
|
||||
|
||||
// AnySet checks bit in mask to see if any provided bit is set to one
|
||||
func (s *bitMask) AnySet(bits []int) bool {
|
||||
for _, b := range bits {
|
||||
if s.IsSet(b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEqual checks if masks are equal
|
||||
func (s *bitMask) IsEqual(mask BitMask) bool {
|
||||
return *s == *mask.(*bitMask)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/eviction/OWNERS
generated
vendored
@ -1,8 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- derekwaynecarr
|
||||
- vishh
|
||||
- dchen1107
|
||||
- dashpole
|
||||
- sjenning
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/images/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/images/OWNERS
generated
vendored
@ -1,5 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
||||
- sjenning
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS
generated
vendored
@ -1,5 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
||||
- sjenning
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/oom/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/oom/OWNERS
generated
vendored
@ -1,4 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/OWNERS
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/preemption/OWNERS
generated
vendored
@ -1,5 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
||||
- sjenning
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/kubelet/stats/OWNERS
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/stats/OWNERS
generated
vendored
@ -1,4 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dashpole
|
@ -8,11 +8,14 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/plugins/helper:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -25,15 +25,22 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
sharedlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
// DefaultPodTopologySpread is a plugin that calculates selector spread priority.
|
||||
type DefaultPodTopologySpread struct {
|
||||
handle framework.FrameworkHandle
|
||||
sharedLister sharedlisters.SharedLister
|
||||
services corelisters.ServiceLister
|
||||
replicationControllers corelisters.ReplicationControllerLister
|
||||
replicaSets appslisters.ReplicaSetLister
|
||||
statefulSets appslisters.StatefulSetLister
|
||||
}
|
||||
|
||||
var _ framework.ScorePlugin = &DefaultPodTopologySpread{}
|
||||
@ -90,7 +97,7 @@ func (pl *DefaultPodTopologySpread) Score(ctx context.Context, state *framework.
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("%+v convert to tainttoleration.preScoreState error", c))
|
||||
}
|
||||
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
nodeInfo, err := pl.sharedLister.NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
|
||||
}
|
||||
@ -117,7 +124,7 @@ func (pl *DefaultPodTopologySpread) NormalizeScore(ctx context.Context, state *f
|
||||
if scores[i].Score > maxCountByNodeName {
|
||||
maxCountByNodeName = scores[i].Score
|
||||
}
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(scores[i].Name)
|
||||
nodeInfo, err := pl.sharedLister.NodeInfos().Get(scores[i].Name)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
@ -148,7 +155,7 @@ func (pl *DefaultPodTopologySpread) NormalizeScore(ctx context.Context, state *f
|
||||
}
|
||||
// If there is zone information present, incorporate it
|
||||
if haveZones {
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(scores[i].Name)
|
||||
nodeInfo, err := pl.sharedLister.NodeInfos().Get(scores[i].Name)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
@ -180,13 +187,12 @@ func (pl *DefaultPodTopologySpread) ScoreExtensions() framework.ScoreExtensions
|
||||
// PreScore builds and writes cycle state used by Score and NormalizeScore.
|
||||
func (pl *DefaultPodTopologySpread) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status {
|
||||
var selector labels.Selector
|
||||
informerFactory := pl.handle.SharedInformerFactory()
|
||||
selector = helper.DefaultSelector(
|
||||
pod,
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
informerFactory.Apps().V1().StatefulSets().Lister(),
|
||||
pl.services,
|
||||
pl.replicationControllers,
|
||||
pl.replicaSets,
|
||||
pl.statefulSets,
|
||||
)
|
||||
state := &preScoreState{
|
||||
selector: selector,
|
||||
@ -197,8 +203,20 @@ func (pl *DefaultPodTopologySpread) PreScore(ctx context.Context, cycleState *fr
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
sharedLister := handle.SnapshotSharedLister()
|
||||
if sharedLister == nil {
|
||||
return nil, fmt.Errorf("SnapshotSharedLister is nil")
|
||||
}
|
||||
sharedInformerFactory := handle.SharedInformerFactory()
|
||||
if sharedInformerFactory == nil {
|
||||
return nil, fmt.Errorf("SharedInformerFactory is nil")
|
||||
}
|
||||
return &DefaultPodTopologySpread{
|
||||
handle: handle,
|
||||
sharedLister: sharedLister,
|
||||
services: sharedInformerFactory.Core().V1().Services().Lister(),
|
||||
replicationControllers: sharedInformerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
replicaSets: sharedInformerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
statefulSets: sharedInformerFactory.Apps().V1().StatefulSets().Lister(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
@ -281,6 +281,7 @@ func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, upda
|
||||
if updateAll {
|
||||
// Take a snapshot of the nodes order in the tree
|
||||
snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
cache.nodeTree.resetExhausted()
|
||||
for i := 0; i < cache.nodeTree.numNodes; i++ {
|
||||
nodeName := cache.nodeTree.next()
|
||||
if n := snapshot.nodeInfoMap[nodeName]; n != nil {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/azure_dd/attacher.go
generated
vendored
@ -163,7 +163,7 @@ func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string,
|
||||
|
||||
newDevicePath := ""
|
||||
|
||||
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
|
||||
if newDevicePath, err = findDiskByLun(int(lun), io, exec); err != nil {
|
||||
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/azure_file/azure_file.go
generated
vendored
@ -294,7 +294,7 @@ func (b *azureFileMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) e
|
||||
}
|
||||
|
||||
mountComplete := false
|
||||
err = wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
err = wait.PollImmediate(1*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
err := b.mounter.MountSensitive(source, dir, "cifs", mountOptions, sensitiveMountOptions)
|
||||
mountComplete = true
|
||||
return true, err
|
||||
|
140
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
140
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
@ -61,6 +61,13 @@ type vmssMetaInfo struct {
|
||||
resourceGroup string
|
||||
}
|
||||
|
||||
// nodeIdentity identifies a node within a subscription.
|
||||
type nodeIdentity struct {
|
||||
resourceGroup string
|
||||
vmssName string
|
||||
nodeName string
|
||||
}
|
||||
|
||||
// scaleSet implements VMSet interface for Azure scale set.
|
||||
type scaleSet struct {
|
||||
*Cloud
|
||||
@ -70,7 +77,7 @@ type scaleSet struct {
|
||||
availabilitySet VMSet
|
||||
|
||||
vmssCache *azcache.TimedCache
|
||||
vmssVMCache *azcache.TimedCache
|
||||
vmssVMCache *sync.Map // [resourcegroup/vmssname]*azcache.TimedCache
|
||||
availabilitySetNodesCache *azcache.TimedCache
|
||||
}
|
||||
|
||||
@ -80,6 +87,7 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
ss := &scaleSet{
|
||||
Cloud: az,
|
||||
availabilitySet: newAvailabilitySet(az),
|
||||
vmssVMCache: &sync.Map{},
|
||||
}
|
||||
|
||||
if !ss.DisableAvailabilitySetNodes {
|
||||
@ -94,11 +102,6 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
@ -139,12 +142,17 @@ func (ss *scaleSet) getVMSS(vmssName string, crt azcache.AzureCacheReadType) (*c
|
||||
return vmss, nil
|
||||
}
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
// getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache.
|
||||
// Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity.
|
||||
func (ss *scaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
cacheKey, cache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
getter := func(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) {
|
||||
var found bool
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
|
||||
cached, err := cache.Get(cacheKey, crt)
|
||||
if err != nil {
|
||||
return "", "", nil, found, err
|
||||
}
|
||||
@ -159,19 +167,19 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (
|
||||
return "", "", nil, found, nil
|
||||
}
|
||||
|
||||
_, err := getScaleSetVMInstanceID(nodeName)
|
||||
_, err = getScaleSetVMInstanceID(node.nodeName)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
vmssName, instanceID, vm, found, err := getter(nodeName, crt)
|
||||
vmssName, instanceID, vm, found, err := getter(node.nodeName, crt)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
if !found {
|
||||
klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName)
|
||||
vmssName, instanceID, vm, found, err = getter(nodeName, azcache.CacheReadTypeForceRefresh)
|
||||
klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", node.nodeName)
|
||||
vmssName, instanceID, vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
@ -187,6 +195,17 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (
|
||||
return vmssName, instanceID, vm, nil
|
||||
}
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// Returns cloudprovider.InstanceNotFound if nodeName does not belong to any scale set.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
node, err := ss.getNodeIdentityByNodeName(nodeName, crt)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
return ss.getVmssVMByNodeIdentity(node, crt)
|
||||
}
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
|
||||
@ -222,8 +241,13 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
|
||||
cacheKey, cache, err := ss.getVMSSVMCache(resourceGroup, scaleSetName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
getter := func(crt azcache.AzureCacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
|
||||
cached, err := cache.Get(cacheKey, crt)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@ -259,6 +283,13 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
|
||||
if found && vm != nil {
|
||||
return vm, nil
|
||||
}
|
||||
if found && vm == nil {
|
||||
klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache if it is expired", scaleSetName, instanceID)
|
||||
vm, found, err = getter(azcache.CacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if !found || vm == nil {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
@ -560,29 +591,64 @@ func extractResourceGroupByProviderID(providerID string) (string, error) {
|
||||
return matches[1], nil
|
||||
}
|
||||
|
||||
// listScaleSets lists all scale sets.
|
||||
func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
|
||||
allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup)
|
||||
if rerr != nil {
|
||||
klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr)
|
||||
return nil, rerr.Error()
|
||||
}
|
||||
|
||||
ssNames := make([]string, 0)
|
||||
for _, vmss := range allScaleSets {
|
||||
name := *vmss.Name
|
||||
if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 {
|
||||
klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name)
|
||||
continue
|
||||
// getNodeIdentityByNodeName use the VMSS cache to find a node's resourcegroup and vmss, returned in a nodeIdentity.
|
||||
func (ss *scaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) {
|
||||
getter := func(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) {
|
||||
node := &nodeIdentity{
|
||||
nodeName: nodeName,
|
||||
}
|
||||
|
||||
ssNames = append(ssNames, name)
|
||||
cached, err := ss.vmssCache.Get(vmssKey, crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmsses := cached.(*sync.Map)
|
||||
vmsses.Range(func(key, value interface{}) bool {
|
||||
v := value.(*vmssEntry)
|
||||
if v.vmss.Name == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
vmssPrefix := *v.vmss.Name
|
||||
if v.vmss.VirtualMachineProfile != nil &&
|
||||
v.vmss.VirtualMachineProfile.OsProfile != nil &&
|
||||
v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix != nil {
|
||||
vmssPrefix = *v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix
|
||||
}
|
||||
|
||||
if strings.EqualFold(vmssPrefix, nodeName[:len(nodeName)-6]) {
|
||||
node.vmssName = *v.vmss.Name
|
||||
node.resourceGroup = v.resourceGroup
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
return node, nil
|
||||
}
|
||||
|
||||
return ssNames, nil
|
||||
if _, err := getScaleSetVMInstanceID(nodeName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, err := getter(nodeName, crt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if node.vmssName != "" {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Couldn't find VMSS for node %s, refreshing the cache", nodeName)
|
||||
node, err = getter(nodeName, azcache.CacheReadTypeForceRefresh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if node.vmssName == "" {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// listScaleSetVMs lists VMs belonging to the specified scale set.
|
||||
@ -949,6 +1015,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
|
||||
if ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
if ss.ShouldNodeExcludedFromLoadBalancer(node) {
|
||||
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes
|
||||
resourceGroupName, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID)
|
||||
if err != nil {
|
||||
|
197
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go
generated
vendored
197
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go
generated
vendored
@ -20,6 +20,7 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -36,7 +37,6 @@ var (
|
||||
vmssNameSeparator = "_"
|
||||
|
||||
vmssKey = "k8svmssKey"
|
||||
vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
availabilitySetNodesCacheTTLDefaultInSeconds = 900
|
||||
@ -53,8 +53,9 @@ type vmssVirtualMachinesEntry struct {
|
||||
}
|
||||
|
||||
type vmssEntry struct {
|
||||
vmss *compute.VirtualMachineScaleSet
|
||||
lastUpdate time.Time
|
||||
vmss *compute.VirtualMachineScaleSet
|
||||
resourceGroup string
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) {
|
||||
@ -80,8 +81,9 @@ func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) {
|
||||
continue
|
||||
}
|
||||
localCache.Store(*scaleSet.Name, &vmssEntry{
|
||||
vmss: &scaleSet,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
vmss: &scaleSet,
|
||||
resourceGroup: resourceGroup,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -109,15 +111,63 @@ func extractVmssVMName(name string) (string, string, error) {
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) {
|
||||
// getVMSSVMCache returns an *azcache.TimedCache and cache key for a VMSS (creating that cache if new).
|
||||
func (ss *scaleSet) getVMSSVMCache(resourceGroup, vmssName string) (string, *azcache.TimedCache, error) {
|
||||
cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName))
|
||||
if entry, ok := ss.vmssVMCache.Load(cacheKey); ok {
|
||||
cache := entry.(*azcache.TimedCache)
|
||||
return cacheKey, cache, nil
|
||||
}
|
||||
|
||||
cache, err := ss.newVMSSVirtualMachinesCache(resourceGroup, vmssName, cacheKey)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
ss.vmssVMCache.Store(cacheKey, cache)
|
||||
return cacheKey, cache, nil
|
||||
}
|
||||
|
||||
// gcVMSSVMCache delete stale VMSS VMs caches from deleted VMSSes.
|
||||
func (ss *scaleSet) gcVMSSVMCache() error {
|
||||
cached, err := ss.vmssCache.Get(vmssKey, azcache.CacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vmsses := cached.(*sync.Map)
|
||||
removed := map[string]bool{}
|
||||
ss.vmssVMCache.Range(func(key, value interface{}) bool {
|
||||
cacheKey := key.(string)
|
||||
vlistIdx := cacheKey[strings.LastIndex(cacheKey, "/")+1:]
|
||||
if _, ok := vmsses.Load(vlistIdx); !ok {
|
||||
removed[cacheKey] = true
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
for key := range removed {
|
||||
ss.vmssVMCache.Delete(key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS.
|
||||
func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
|
||||
if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 {
|
||||
ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds
|
||||
}
|
||||
vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second
|
||||
|
||||
getter := func(key string) (interface{}, error) {
|
||||
localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
|
||||
|
||||
oldCache := make(map[string]vmssVirtualMachinesEntry)
|
||||
|
||||
if ss.vmssVMCache != nil {
|
||||
if vmssCache, ok := ss.vmssVMCache.Load(cacheKey); ok {
|
||||
// get old cache before refreshing the cache
|
||||
entry, exists, err := ss.vmssVMCache.Store.GetByKey(vmssVirtualMachinesKey)
|
||||
cache := vmssCache.(*azcache.TimedCache)
|
||||
entry, exists, err := cache.Store.GetByKey(cacheKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -133,97 +183,94 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) {
|
||||
}
|
||||
}
|
||||
|
||||
allResourceGroups, err := ss.GetResourceGroups()
|
||||
vms, err := ss.listScaleSetVMs(vmssName, resourceGroupName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resourceGroup := range allResourceGroups.List() {
|
||||
scaleSetNames, err := ss.listScaleSets(resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for i := range vms {
|
||||
vm := vms[i]
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
klog.Warningf("failed to get computerName for vmssVM (%q)", vmssName)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ssName := range scaleSetNames {
|
||||
vms, err := ss.listScaleSetVMs(ssName, resourceGroup)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
vmssVMCacheEntry := &vmssVirtualMachinesEntry{
|
||||
resourceGroup: resourceGroupName,
|
||||
vmssName: vmssName,
|
||||
instanceID: to.String(vm.InstanceID),
|
||||
virtualMachine: &vm,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
}
|
||||
// set cache entry to nil when the VM is under deleting.
|
||||
if vm.VirtualMachineScaleSetVMProperties != nil &&
|
||||
strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) {
|
||||
klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName)
|
||||
vmssVMCacheEntry.virtualMachine = nil
|
||||
}
|
||||
localCache.Store(computerName, vmssVMCacheEntry)
|
||||
|
||||
for i := range vms {
|
||||
vm := vms[i]
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
klog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
|
||||
continue
|
||||
}
|
||||
delete(oldCache, computerName)
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
vmssVMCacheEntry := &vmssVirtualMachinesEntry{
|
||||
resourceGroup: resourceGroup,
|
||||
vmssName: ssName,
|
||||
instanceID: to.String(vm.InstanceID),
|
||||
virtualMachine: &vm,
|
||||
lastUpdate: time.Now().UTC(),
|
||||
}
|
||||
// set cache entry to nil when the VM is under deleting.
|
||||
if vm.VirtualMachineScaleSetVMProperties != nil &&
|
||||
strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) {
|
||||
klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName)
|
||||
vmssVMCacheEntry.virtualMachine = nil
|
||||
}
|
||||
localCache.Store(computerName, vmssVMCacheEntry)
|
||||
|
||||
if _, exists := oldCache[computerName]; exists {
|
||||
delete(oldCache, computerName)
|
||||
}
|
||||
}
|
||||
// add old missing cache data with nil entries to prevent aggressive
|
||||
// ARM calls during cache invalidation
|
||||
for name, vmEntry := range oldCache {
|
||||
// if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache
|
||||
// then it should not be added back to the cache
|
||||
if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > vmssVirtualMachinesCacheTTL {
|
||||
klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
|
||||
continue
|
||||
}
|
||||
lastUpdate := time.Now().UTC()
|
||||
if vmEntry.virtualMachine == nil {
|
||||
// if this is already a nil entry then keep the time the nil
|
||||
// entry was first created, so we can cleanup unwanted entries
|
||||
lastUpdate = vmEntry.lastUpdate
|
||||
}
|
||||
|
||||
// add old missing cache data with nil entries to prevent aggressive
|
||||
// ARM calls during cache invalidation
|
||||
for name, vmEntry := range oldCache {
|
||||
// if the nil cache entry has existed for 15 minutes in the cache
|
||||
// then it should not be added back to the cache
|
||||
if vmEntry.virtualMachine == nil || time.Since(vmEntry.lastUpdate) > 15*time.Minute {
|
||||
klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
|
||||
continue
|
||||
}
|
||||
lastUpdate := time.Now().UTC()
|
||||
if vmEntry.virtualMachine == nil {
|
||||
// if this is already a nil entry then keep the time the nil
|
||||
// entry was first created, so we can cleanup unwanted entries
|
||||
lastUpdate = vmEntry.lastUpdate
|
||||
}
|
||||
|
||||
klog.V(5).Infof("adding old entries to new cache for %s", name)
|
||||
localCache.Store(name, &vmssVirtualMachinesEntry{
|
||||
resourceGroup: vmEntry.resourceGroup,
|
||||
vmssName: vmEntry.vmssName,
|
||||
instanceID: vmEntry.instanceID,
|
||||
virtualMachine: nil,
|
||||
lastUpdate: lastUpdate,
|
||||
})
|
||||
}
|
||||
klog.V(5).Infof("adding old entries to new cache for %s", name)
|
||||
localCache.Store(name, &vmssVirtualMachinesEntry{
|
||||
resourceGroup: vmEntry.resourceGroup,
|
||||
vmssName: vmEntry.vmssName,
|
||||
instanceID: vmEntry.instanceID,
|
||||
virtualMachine: nil,
|
||||
lastUpdate: lastUpdate,
|
||||
})
|
||||
}
|
||||
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 {
|
||||
ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds
|
||||
}
|
||||
return azcache.NewTimedcache(time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds)*time.Second, getter)
|
||||
return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, azcache.CacheReadTypeUnsafe)
|
||||
node, err := ss.getNodeIdentityByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
virtualMachines := cached.(*sync.Map)
|
||||
cacheKey, timedcache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
|
||||
if err != nil {
|
||||
klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
vmcache, err := timedcache.Get(cacheKey, azcache.CacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
|
||||
return err
|
||||
}
|
||||
virtualMachines := vmcache.(*sync.Map)
|
||||
virtualMachines.Delete(nodeName)
|
||||
|
||||
if err := ss.gcVMSSVMCache(); err != nil {
|
||||
klog.Errorf("deleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
5
vendor/k8s.io/legacy-cloud-providers/gce/gce_zones.go
generated
vendored
5
vendor/k8s.io/legacy-cloud-providers/gce/gce_zones.go
generated
vendored
@ -20,6 +20,7 @@ package gce
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
@ -79,7 +80,9 @@ func (g *Cloud) ListZonesInRegion(region string) ([]*compute.Zone, error) {
|
||||
defer cancel()
|
||||
|
||||
mc := newZonesMetricContext("list", region)
|
||||
list, err := g.c.Zones().List(ctx, filter.Regexp("region", g.getRegionLink(region)))
|
||||
// Use regex match instead of an exact regional link constructed from getRegionalLink below.
|
||||
// See comments in issue kubernetes/kubernetes#87905
|
||||
list, err := g.c.Zones().List(ctx, filter.Regexp("region", fmt.Sprintf(".*/regions/%s", region)))
|
||||
if err != nil {
|
||||
return nil, mc.Observe(err)
|
||||
}
|
||||
|
42
vendor/modules.txt
vendored
42
vendor/modules.txt
vendored
@ -1223,7 +1223,7 @@ gopkg.in/square/go-jose.v2/jwt
|
||||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.3.0
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.18.5 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1
|
||||
# k8s.io/api v0.18.5 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.8-k3s1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/admissionregistration/v1
|
||||
@ -1267,7 +1267,7 @@ k8s.io/api/settings/v1alpha1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.8-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
@ -1307,7 +1307,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.18.5 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1
|
||||
# k8s.io/apimachinery v0.18.5 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.8-k3s1
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
k8s.io/apimachinery/pkg/api/meta
|
||||
@ -1369,7 +1369,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.8-k3s1
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
k8s.io/apiserver/pkg/admission/initializer
|
||||
@ -1499,7 +1499,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.8-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
@ -1512,7 +1512,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.8-k3s1
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
k8s.io/client-go/discovery/cached/disk
|
||||
@ -1746,7 +1746,7 @@ k8s.io/client-go/util/jsonpath
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.8-k3s1
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
@ -1754,13 +1754,13 @@ k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.8-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.8-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
@ -1775,7 +1775,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.8-k3s1
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
k8s.io/component-base/codec
|
||||
@ -1793,10 +1793,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
|
||||
k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.8-k3s1
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.8-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
|
||||
@ -1811,7 +1811,7 @@ k8s.io/gengo/types
|
||||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog v1.0.0
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.8-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
@ -1839,7 +1839,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.8-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
@ -1850,14 +1850,14 @@ k8s.io/kube-openapi/pkg/schemaconv
|
||||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.8-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.8-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
k8s.io/kube-scheduler/config/v1alpha2
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.8-k3s1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
@ -1932,11 +1932,11 @@ k8s.io/kubectl/pkg/util/storage
|
||||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.8-k3s1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.6-k3s1
|
||||
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.8-k3s1
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
|
||||
@ -2680,7 +2680,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
k8s.io/kubernetes/third_party/forked/ipvs
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.8-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
@ -2711,7 +2711,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.8-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
|
Loading…
Reference in New Issue
Block a user