Merge pull request #908 from erikwilson/bump-k8s-1.16.2-k3s.1

Upgrade k8s to v1.16.2-k3s.1
This commit is contained in:
Erik Wilson 2019-10-16 17:32:03 -07:00 committed by GitHub
commit ccb1d81fc9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 694 additions and 444 deletions

52
go.mod
View File

@ -31,31 +31,31 @@ replace (
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
github.com/rancher/kine => github.com/ibuildthecloud/kine v0.1.0
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.0-k3s.1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.0-k3s.1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.0-k3s.1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.0-k3s.1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.0-k3s.1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.0-k3s.1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.0-k3s.1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.0-k3s.1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.0-k3s.1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.0-k3s.1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.0-k3s.1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.0-k3s.1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.0-k3s.1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.0-k3s.1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.0-k3s.1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.0-k3s.1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.0-k3s.1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.0-k3s.1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.0-k3s.1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.0-k3s.1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.0-k3s.1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.0-k3s.1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.0-k3s.1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.0-k3s.1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.0-k3s.1
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.2-k3s.1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.2-k3s.1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.2-k3s.1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.2-k3s.1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.2-k3s.1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)
@ -112,7 +112,7 @@ require (
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
google.golang.org/grpc v1.23.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
gopkg.in/yaml.v2 v2.2.2
gopkg.in/yaml.v2 v2.2.4
k8s.io/api v0.0.0
k8s.io/apimachinery v0.0.0
k8s.io/apiserver v0.0.0

88
go.sum
View File

@ -569,49 +569,49 @@ github.com/rancher/flannel v0.11.0-k3s.1 h1:mIwnfWDafjzQgFkZeJ1AkFrrAT3EdBaA1giE
github.com/rancher/flannel v0.11.0-k3s.1/go.mod h1:Hn4ZV+eq0LhLZP63xZnxdGwXEoRSxs5sxELxu27M3UA=
github.com/rancher/helm-controller v0.2.2 h1:MUqisy53/Ay1EYOF2uTCYBbGpgtZLNKKrI01BdxIbQo=
github.com/rancher/helm-controller v0.2.2/go.mod h1:0JkL0UjxddNbT4FmLoESarD4Mz8xzA5YlejqJ/U4g+8=
github.com/rancher/kubernetes v1.16.0-k3s.1 h1:6ewJ22WFt/3l/7d724XiAXZWbatkzWZ/Q8kMow7Jq4E=
github.com/rancher/kubernetes v1.16.0-k3s.1/go.mod h1:nlP2zevWKRGKuaaVbKIwozU0Rjg9leVDXkL4YTtjmVs=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.0-k3s.1 h1:1YwarEgr4s4U+A/y6Uh/wofC1lQL4ofUpmRbV8KMsKg=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.0-k3s.1/go.mod h1:cHpnPcbNeE90PrTRnTu13OM+FN+ROt82odVbEh++81o=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.0-k3s.1 h1:c5mbi9EKISNNtZMnkDlyYKXIh/gI5MeenbXZ0N4yUc8=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.0-k3s.1/go.mod h1:f1tFT2pOqPzfckbG1GjHIzy3G+T2LW7rchcruNoLaiM=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.0-k3s.1 h1:Fm48lBedfsCszeY4Q2BDg9ccRD5AoLEwlh3NOCMmedk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.0-k3s.1/go.mod h1:PNw+FbGH4/s3zK9V3rAeMiHTbQz2CU/yqAkfQ2UgLVs=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.0-k3s.1 h1:mRAt17ngpONNVQLlTm/iK4JyeDwFGIAc/bD666CmQgY=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.0-k3s.1/go.mod h1:WmFoxjELD2xtWb77Yj9RPibT5ACkQYEW9lPQtNkGtbE=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.0-k3s.1 h1:RsguCKnuuJ6A888TmCDE3dBFNMS3PxLVd1E23JlTwho=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.0-k3s.1/go.mod h1:nBogvbgjMgo7AeVA6CuqVO13LVIfmlQ11t6xzAJdBN8=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.0-k3s.1 h1:lbaWYcUTfeojZbH4XCHCtoy6k7EITrtbyiUavdYYBdU=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.0-k3s.1/go.mod h1:GiGfbsjtP4tOW6zgpL8/vCUoyXAV5+9X2onLursPi08=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.0-k3s.1 h1:3OR35tGENK2ZZNcPH42gf7aVXSQTcEjn0m5yyISYGR4=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.0-k3s.1/go.mod h1:77Vtl0d5SOrs6vqwqhZZQakDEovGSm2rRqtpTeteqcQ=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.0-k3s.1 h1:+hJi1dN5Gx/61j0JrPNKabugy2oHuXg0Mh0bYM+Z4DM=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.0-k3s.1/go.mod h1:Gwev4EWWC1Yfr0gBTJR0n8FYLsIdRu4ARubU6hXRadU=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.0-k3s.1 h1:wunMwVd7YXw67Cx807U9d6zGT24r2ZuJOX20EzIeEHM=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.0-k3s.1/go.mod h1:L8deZCu6NpzgKzY91TOGKJ1JtAoHd8WyJ/HdoxqZCGo=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.0-k3s.1 h1:WcReZtJ0G7/HTrgwmffU3K1P+6TDDW9HCMw7IFAEBug=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.0-k3s.1/go.mod h1:spPP+vRNS8EsnNNIhFCZTTuRO3XhV1WoF18HJySoZn8=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.0-k3s.1 h1:UlJ7h0DDRZBIsZVLqoKsie4FHu+JQ5A8mGgcGRwNoxs=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.0-k3s.1/go.mod h1:FuWtYjatYStosiEepg0w/7/QrG0T/HMh/FA5T/8AIP8=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.0-k3s.1 h1:T5EF0wAGqg9+QMGuwPMsT6xA8ZDG+GHQ6tXq3xIAmFM=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.0-k3s.1/go.mod h1:w51XnEBJkmGEjUGylUXL1TezQIc0JYndQCsVkQMHjKA=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.0-k3s.1 h1:0VWFVFZZ2VikaQOfmyIIesAiLd8MgKZ8HurOxAonP8Y=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.0-k3s.1/go.mod h1:ttKFRQ6/4l0mjLwPJ/Ccn9k/vc/6y5dJ98r88NLLiGw=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.0-k3s.1 h1:JGbU2LIo1+DzW6MPfNmJYI8Qn2aLnDv21O7J/ey7xNQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.0-k3s.1/go.mod h1:Wm4X9LSXr3uszFEajh8M75iyxHdjOKSp0LCL4TIp7UQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.0-k3s.1 h1:Z1shjJcAUhXKf9LfT0Mba166rxwu0o6CyiHYQcLbwHk=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.0-k3s.1/go.mod h1:8btekvQmHgyy4XTchusVAW/mQIPE+hVLn61sZ/epsAA=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.0-k3s.1 h1:br7jLG19+bH/iZPesP1gwzudnx1Un1nLHhInNDXmnX8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.0-k3s.1/go.mod h1:sBq5nR6KVpfnkBsj4RjOQhw0j5yOtLHXIX2Dz5uZQmw=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.0-k3s.1 h1:tZr8sx5kksmXVfIHePJybqbPttx5RvHoCLCerVBOaoI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.0-k3s.1/go.mod h1:ellVfoCz8MlDjTnkqsTkU5svJOIjcK3XNx/onmixgDk=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.0-k3s.1 h1:HJBoiE5HsE0m1xjl/pjMcMrnOQcNNrBcfplXPr9ls4A=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.0-k3s.1/go.mod h1:4Sbo2Vn3tAIZpwx4YIp+SushTtzzzabVrg9Tq4rrImM=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.0-k3s.1 h1:F0OrMX+CQPJf8G/Gsf1BjQE/luDCy8KLLhmtl4JUhNM=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.0-k3s.1/go.mod h1:OpqDei2/Qdg+5YGQYPiEuQ4vlFoiAJy0Ysn8aLKP7Cs=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.0-k3s.1 h1:HDchQ4clidr6miO5cNGUTCor9X8ByeyACcxXOMk4c/s=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.0-k3s.1/go.mod h1:vQHTmz0IaEb7/OXPSor1uga8Er0V+2M5aSdXG832NbU=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.0-k3s.1/go.mod h1:E3i4wscD52Qj6PEcgUjvCd81Tl6Mghk1GHtEzoaaqwU=
github.com/rancher/kubernetes v1.16.2-k3s.1 h1:+oJEecXgQDkEOD/X8z2YUdYVonbXZtGzXsmtKDPYesg=
github.com/rancher/kubernetes v1.16.2-k3s.1/go.mod h1:SmhGgKfQ30imqjFVj8AI+iW+zSyFsswNErKYeTfgoH0=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1 h1:2kK5KD6MU86txBYKG+tM6j5zbey02DaIDtwpG5JsfnI=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1/go.mod h1:cHpnPcbNeE90PrTRnTu13OM+FN+ROt82odVbEh++81o=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1 h1:vZGFZHN6AmoIRdzj57gIB3oya7pb17wWDHD/ZKB+k68=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1/go.mod h1:CfhfFElTQ5DKDOaHZ6/P2QeJM9Dkg9udFx7Vp3wE3eo=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1 h1:Iaa5tbsSuMZqvXYwosTs+2fyqkFTwDNOyXmkoe6J1aA=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1/go.mod h1:I9gveEHn8RBUsRZ1zR7UbePt/ySCzzxsG1dWwpKNN5E=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1 h1:47GCm5H0FV2uWM9w8/x7MAMbty/DgXmvvslrrUB0FZs=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1/go.mod h1:tUn8Kq7rjvsqTXw4Ku6HT6lyaUAtz46fVolVnz+DZlw=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1 h1:CmCcCfMqu//cm8cTTYwMPV6MDWpWLLDpDdWrGUghvHw=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1/go.mod h1:nBogvbgjMgo7AeVA6CuqVO13LVIfmlQ11t6xzAJdBN8=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1 h1:zwn4rTNrX7RTlLLE2+shx3X+6LHnub8Zdpoh44Q+FhY=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1/go.mod h1:GiGfbsjtP4tOW6zgpL8/vCUoyXAV5+9X2onLursPi08=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1 h1:qbNYhEltz9DRXcSOeMjDvvegiLSG8jWdUhkU2D/8KSk=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1/go.mod h1:77Vtl0d5SOrs6vqwqhZZQakDEovGSm2rRqtpTeteqcQ=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1 h1:efPol+sRfueqsVUj/cUXYaAHqM0WGbLdoqBxxTvWrQY=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1/go.mod h1:Gwev4EWWC1Yfr0gBTJR0n8FYLsIdRu4ARubU6hXRadU=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1 h1:MfD1MXvfc81W2KEbwobebkgCZNqR4ExD9lajYDorA/A=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1/go.mod h1:uzCZz0cC/uXDgpjpMZ7lFzglGU/9tXyTiPDcX92d6OI=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1 h1:eA5RmPXBgUTbc82Gch3T1tcuJFP6T4iE7aUdusFXVOY=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1/go.mod h1:spPP+vRNS8EsnNNIhFCZTTuRO3XhV1WoF18HJySoZn8=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1 h1:rs9F4np5cLbEBew/9jMH5/j651FhSs7KuRZD2eOIKR0=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1/go.mod h1:FuWtYjatYStosiEepg0w/7/QrG0T/HMh/FA5T/8AIP8=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1 h1:q9cfT/qFOHKIjF9mPauwhmGUfRV8n+U9735Cf5q0bMA=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1/go.mod h1:w51XnEBJkmGEjUGylUXL1TezQIc0JYndQCsVkQMHjKA=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1 h1:1uID+qLmE02FmTa9Ory7zKJJSyW23BCQEGTB/24WCoE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1/go.mod h1:ttKFRQ6/4l0mjLwPJ/Ccn9k/vc/6y5dJ98r88NLLiGw=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1 h1:xFBNpH30wgcJ9lAlXKNQyJmz8YxiMxn8UCbXSEQ3gxQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1/go.mod h1:Wm4X9LSXr3uszFEajh8M75iyxHdjOKSp0LCL4TIp7UQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1 h1:Xq9l0M8ZK3jikoiVqLGRdLVA4P8QftfQ/lD8395Fuhc=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1/go.mod h1:8btekvQmHgyy4XTchusVAW/mQIPE+hVLn61sZ/epsAA=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1 h1:G+WwhP95psm9fSQ+OHcXEVVLlUSjVoHgD+6nLoEWNuY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1/go.mod h1:sBq5nR6KVpfnkBsj4RjOQhw0j5yOtLHXIX2Dz5uZQmw=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1 h1:OgATrfXxWVmBjmlZtV9oq+G9XddY3051GyI7lhaNKws=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1/go.mod h1:b4UoBV6BbZRU3F2VDqLsXqWFeNUhT2EtirOINqUzgOs=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1 h1:4hK23wpRnc38rDaapHOk2d6DNOF4GhDdbISUVXSRQR8=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1/go.mod h1:4Sbo2Vn3tAIZpwx4YIp+SushTtzzzabVrg9Tq4rrImM=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1 h1:8tbJkA/XARMaf3/3Kas4K7MF4uQOUkRmz4aiEgfnBrM=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1/go.mod h1:OpqDei2/Qdg+5YGQYPiEuQ4vlFoiAJy0Ysn8aLKP7Cs=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1 h1:Br9g854hWBNf1JDWesBZ11dNQxnYtGaWGSLOc6ITXds=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1/go.mod h1:vQHTmz0IaEb7/OXPSor1uga8Er0V+2M5aSdXG832NbU=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.2-k3s.1/go.mod h1:E3i4wscD52Qj6PEcgUjvCd81Tl6Mghk1GHtEzoaaqwU=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009 h1:Xsxh7fX3+2wAUJtPy8g2lZh0cYuyifqhBL0vxCIYojs=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
@ -897,6 +897,8 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=

38
vendor/gopkg.in/yaml.v2/decode.go generated vendored
View File

@ -229,6 +229,10 @@ type decoder struct {
mapType reflect.Type
terrors []string
strict bool
decodeCount int
aliasCount int
aliasDepth int
}
var (
@ -314,7 +318,39 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
return out, false, false
}
const (
// 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion
alias_ratio_range_low = 400000
// 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion
alias_ratio_range_high = 4000000
// alias_ratio_range is the range over which we scale allowed alias ratios
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
)
func allowedAliasRatio(decodeCount int) float64 {
switch {
case decodeCount <= alias_ratio_range_low:
// allow 99% to come from alias expansion for small-to-medium documents
return 0.99
case decodeCount >= alias_ratio_range_high:
// allow 10% to come from alias expansion for very large documents
return 0.10
default:
// scale smoothly from 99% down to 10% over the range.
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
}
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
d.decodeCount++
if d.aliasDepth > 0 {
d.aliasCount++
}
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
failf("document contains excessive aliasing")
}
switch n.kind {
case documentNode:
return d.document(n, out)
@ -353,7 +389,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n] = true
d.aliasDepth++
good = d.unmarshal(n.alias, out)
d.aliasDepth--
delete(d.aliases, n)
return good
}

2
vendor/gopkg.in/yaml.v2/resolve.go generated vendored
View File

@ -81,7 +81,7 @@ func resolvableTag(tag string) bool {
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {

16
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View File

@ -906,6 +906,9 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
return true
}
// max_flow_level limits the flow_level
const max_flow_level = 10000
// Increase the flow level and resize the simple key list if needed.
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
// Reset the simple key on the next level.
@ -913,6 +916,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
// Increase the flow level.
parser.flow_level++
if parser.flow_level > max_flow_level {
return yaml_parser_set_scanner_error(parser,
"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
fmt.Sprintf("exceeded max depth of %d", max_flow_level))
}
return true
}
@ -925,6 +933,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
return true
}
// max_indents limits the indents stack size
const max_indents = 10000
// Push the current indentation level to the stack and set the new level
// the current column is greater than the indentation level. In this case,
// append or insert the specified token into the token queue.
@ -939,6 +950,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml
// indentation level.
parser.indents = append(parser.indents, parser.indent)
parser.indent = column
if len(parser.indents) > max_indents {
return yaml_parser_set_scanner_error(parser,
"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
fmt.Sprintf("exceeded max depth of %d", max_indents))
}
// Create a token and insert it into the queue.
token := yaml_token_t{

View File

@ -201,6 +201,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
c.GenericConfig.RequestTimeout,
time.Duration(c.GenericConfig.MinRequestTimeout)*time.Second,
apiGroupInfo.StaticOpenAPISpec,
c.GenericConfig.MaxRequestBodyBytes,
)
if err != nil {
return nil, err

View File

@ -125,6 +125,10 @@ type crdHandler struct {
// purpose of managing fields, it is how CR handlers get the structure
// of TypeMeta and ObjectMeta
staticOpenAPISpec *spec.Swagger
// The limit on the request size that would be accepted and decoded in a write request
// 0 means no limit.
maxRequestBodyBytes int64
}
// crdInfo stores enough information to serve the storage for the custom resource
@ -169,7 +173,8 @@ func NewCustomResourceDefinitionHandler(
authorizer authorizer.Authorizer,
requestTimeout time.Duration,
minRequestTimeout time.Duration,
staticOpenAPISpec *spec.Swagger) (*crdHandler, error) {
staticOpenAPISpec *spec.Swagger,
maxRequestBodyBytes int64) (*crdHandler, error) {
ret := &crdHandler{
versionDiscoveryHandler: versionDiscoveryHandler,
groupDiscoveryHandler: groupDiscoveryHandler,
@ -185,6 +190,7 @@ func NewCustomResourceDefinitionHandler(
requestTimeout: requestTimeout,
minRequestTimeout: minRequestTimeout,
staticOpenAPISpec: staticOpenAPISpec,
maxRequestBodyBytes: maxRequestBodyBytes,
}
crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ret.createCustomResourceDefinition,
@ -812,6 +818,8 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd
TableConvertor: storages[v.Name].CustomResource,
Authorizer: r.authorizer,
MaxRequestBodyBytes: r.maxRequestBodyBytes,
}
if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
reqScope := *requestScopes[v.Name]

View File

@ -48,6 +48,7 @@ go_test(
srcs = [
"convert_test.go",
"goopenapi_test.go",
"unfold_test.go",
"validation_test.go",
],
embed = [":go_default_library"],

View File

@ -30,7 +30,7 @@ type Structural struct {
Generic
Extensions
*ValueValidation
ValueValidation *ValueValidation
}
// +k8s:deepcopy-gen=true

View File

@ -35,13 +35,16 @@ func (s *Structural) Unfold() *Structural {
return false
}
if s.AnyOf == nil {
s.AnyOf = []NestedValueValidation{
if s.ValueValidation == nil {
s.ValueValidation = &ValueValidation{}
}
if s.ValueValidation.AnyOf == nil {
s.ValueValidation.AnyOf = []NestedValueValidation{
{ForbiddenGenerics: Generic{Type: "integer"}},
{ForbiddenGenerics: Generic{Type: "string"}},
}
} else {
s.AllOf = append([]NestedValueValidation{
s.ValueValidation.AllOf = append([]NestedValueValidation{
{
ValueValidation: ValueValidation{
AnyOf: []NestedValueValidation{
@ -50,7 +53,7 @@ func (s *Structural) Unfold() *Structural {
},
},
},
}, s.AllOf...)
}, s.ValueValidation.AllOf...)
}
return true

View File

@ -9,6 +9,7 @@ load(
go_test(
name = "go_default_test",
srcs = [
"json_limit_test.go",
"json_test.go",
"meta_test.go",
],
@ -17,6 +18,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
],
)

View File

@ -122,7 +122,27 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
}
iter.ReportError("DecodeNumber", err.Error())
default:
// init depth, if needed
if iter.Attachment == nil {
iter.Attachment = int(1)
}
// remember current depth
originalAttachment := iter.Attachment
// increment depth before descending
if i, ok := iter.Attachment.(int); ok {
iter.Attachment = i + 1
if i > 10000 {
iter.ReportError("parse", "exceeded max depth")
return
}
}
*(*interface{})(ptr) = iter.Read()
// restore current depth
iter.Attachment = originalAttachment
}
}

View File

@ -1,9 +1,6 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -29,3 +26,14 @@ filegroup(
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["yaml_test.go"],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package json
import (
"bytes"
"encoding/json"
"fmt"
"io"
)
@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
// limit recursive depth to prevent stack overflow errors
const maxDepth = 10000
// Unmarshal unmarshals the given data
// If v is a *map[string]interface{}, numbers are converted to int64 or float64
func Unmarshal(data []byte, v interface{}) error {
@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertMapNumbers(*v)
return convertMapNumbers(*v, 0)
case *[]interface{}:
// Build a decoder from the given data
@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertSliceNumbers(*v)
return convertSliceNumbers(*v, 0)
default:
return json.Unmarshal(data, v)
@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error {
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertMapNumbers(m map[string]interface{}) error {
func convertMapNumbers(m map[string]interface{}, depth int) error {
if depth > maxDepth {
return fmt.Errorf("exceeded max depth of %d", maxDepth)
}
var err error
for k, v := range m {
switch v := v.(type) {
case json.Number:
m[k], err = convertNumber(v)
case map[string]interface{}:
err = convertMapNumbers(v)
err = convertMapNumbers(v, depth+1)
case []interface{}:
err = convertSliceNumbers(v)
err = convertSliceNumbers(v, depth+1)
}
if err != nil {
return err
@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error {
// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertSliceNumbers(s []interface{}) error {
func convertSliceNumbers(s []interface{}, depth int) error {
if depth > maxDepth {
return fmt.Errorf("exceeded max depth of %d", maxDepth)
}
var err error
for i, v := range s {
switch v := v.(type) {
case json.Number:
s[i], err = convertNumber(v)
case map[string]interface{}:
err = convertMapNumbers(v)
err = convertMapNumbers(v, depth+1)
case []interface{}:
err = convertSliceNumbers(v)
err = convertSliceNumbers(v, depth+1)
}
if err != nil {
return err

View File

@ -190,11 +190,11 @@ func (f *FieldManager) Apply(liveObj runtime.Object, patch []byte, fieldManager
patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal(patch, &patchObj.Object); err != nil {
return nil, fmt.Errorf("error decoding YAML: %v", err)
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err))
}
if patchObj.GetManagedFields() != nil {
return nil, fmt.Errorf("managed fields must be nil but was %v", patchObj.GetManagedFields())
return nil, errors.NewBadRequest(fmt.Sprintf("metadata.managedFields must be nil"))
}
if patchObj.GetAPIVersion() != f.groupVersion.String() {

View File

@ -337,6 +337,15 @@ func (p *jsonPatcher) createNewObject() (runtime.Object, error) {
func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr error) {
switch p.patchType {
case types.JSONPatchType:
// sanity check potentially abusive patches
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
if len(p.patchBytes) > 1024*1024 {
v := []interface{}{}
if err := json.Unmarshal(p.patchBytes, v); err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
}
}
patchObj, err := jsonpatch.DecodePatch(p.patchBytes)
if err != nil {
return nil, errors.NewBadRequest(err.Error())
@ -352,6 +361,15 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr
}
return patchedJS, nil
case types.MergePatchType:
// sanity check potentially abusive patches
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
if len(p.patchBytes) > 1024*1024 {
v := map[string]interface{}{}
if err := json.Unmarshal(p.patchBytes, v); err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
}
}
return jsonpatch.MergePatch(versionedJS, p.patchBytes)
default:
// only here as a safety net - go-restful filters content-type

View File

@ -180,7 +180,7 @@ type Config struct {
// patch may cause.
// This affects all places that applies json patch in the binary.
JSONPatchMaxCopyBytes int64
// The limit on the request body size that would be accepted and decoded in a write request.
// The limit on the request size that would be accepted and decoded in a write request
// 0 means no limit.
MaxRequestBodyBytes int64
// MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further
@ -297,22 +297,20 @@ func NewConfig(codecs serializer.CodecFactory) *Config {
MinRequestTimeout: 1800,
LivezGracePeriod: time.Duration(0),
ShutdownDelayDuration: time.Duration(0),
// 10MB is the recommended maximum client request size in bytes
// 1.5MB is the default client request size in bytes
// the etcd server should accept. See
// https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90.
// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56.
// A request body might be encoded in json, and is converted to
// proto when persisted in etcd. Assuming the upper bound of
// the size ratio is 10:1, we set 100MB as the largest size
// proto when persisted in etcd, so we allow 2x as the largest size
// increase the "copy" operations in a json patch may cause.
JSONPatchMaxCopyBytes: int64(100 * 1024 * 1024),
// 10MB is the recommended maximum client request size in bytes
JSONPatchMaxCopyBytes: int64(3 * 1024 * 1024),
// 1.5MB is the recommended client request size in byte
// the etcd server should accept. See
// https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90.
// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56.
// A request body might be encoded in json, and is converted to
// proto when persisted in etcd. Assuming the upper bound of
// the size ratio is 10:1, we set 100MB as the largest request
// proto when persisted in etcd, so we allow 2x as the largest request
// body size to be accepted and decoded in a write request.
MaxRequestBodyBytes: int64(100 * 1024 * 1024),
MaxRequestBodyBytes: int64(3 * 1024 * 1024),
// Default to treating watch as a long-running operation
// Generic API servers have no inherent long-running subresources
@ -382,6 +380,17 @@ type CompletedConfig struct {
*completedConfig
}
// AddHealthChecks adds a health check to our config to be exposed by the health endpoints
// of our configured apiserver. We should prefer this to adding healthChecks directly to
// the config unless we explicitly want to add a healthcheck only to a specific health endpoint.
func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) {
for _, check := range healthChecks {
c.HealthzChecks = append(c.HealthzChecks, check)
c.LivezChecks = append(c.LivezChecks, check)
c.ReadyzChecks = append(c.ReadyzChecks, check)
}
}
// Complete fills in any fields not set that are required to have valid data and can be derived
// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver.
func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig {

View File

@ -38,12 +38,9 @@ func (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config,
}
return &restclient.Config{
// Increase QPS limits. The client is currently passed to all admission plugins,
// and those can be throttled in case of higher load on apiserver - see #22340 and #22422
// for more details. Once #22422 is fixed, we may want to remove it.
QPS: 50,
Burst: 100,
Host: "https://" + net.JoinHostPort(host, port),
// Do not limit loopback client QPS.
QPS: -1,
Host: "https://" + net.JoinHostPort(host, port),
// override the ServerName to select our loopback certificate via SNI. This name is also
// used by the client to compare the returns server certificate against.
TLSClientConfig: restclient.TLSClientConfig{

View File

@ -28,6 +28,7 @@ import (
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/endpoints/metrics"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
)
@ -119,6 +120,23 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
return
case <-after:
defer func() {
// resultCh needs to have a reader, since the function doing
// the work needs to send to it. This is defer'd to ensure it runs
// ever if the post timeout work itself panics.
go func() {
res := <-resultCh
if res != nil {
switch t := res.(type) {
case error:
utilruntime.HandleError(t)
default:
utilruntime.HandleError(fmt.Errorf("%v", res))
}
}
}()
}()
postTimeoutFn()
tw.timeout(err)
}

View File

@ -202,7 +202,7 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error {
if err != nil {
return err
}
c.HealthzChecks = append(c.HealthzChecks, healthz.NamedCheck("etcd", func(r *http.Request) error {
c.AddHealthChecks(healthz.NamedCheck("etcd", func(r *http.Request) error {
return healthCheck()
}))
@ -211,8 +211,7 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error {
if err != nil {
return err
}
c.HealthzChecks = append(c.HealthzChecks, kmsPluginHealthzChecks...)
c.AddHealthChecks(kmsPluginHealthzChecks...)
}
return nil

View File

@ -30,6 +30,7 @@ go_library(
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/storage/value",
importpath = "k8s.io/apiserver/pkg/storage/value",
deps = [
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/component-base/metrics:go_default_library",
"//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",

View File

@ -22,6 +22,8 @@ import (
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/errors"
)
func init() {
@ -129,6 +131,7 @@ func NewPrefixTransformers(err error, transformers ...PrefixTransformer) Transfo
// the first transformer.
func (t *prefixTransformers) TransformFromStorage(data []byte, context Context) ([]byte, bool, error) {
start := time.Now()
var errs []error
for i, transformer := range t.transformers {
if bytes.HasPrefix(data, transformer.Prefix) {
result, stale, err := transformer.Transformer.TransformFromStorage(data[len(transformer.Prefix):], context)
@ -144,9 +147,48 @@ func (t *prefixTransformers) TransformFromStorage(data []byte, context Context)
} else {
RecordTransformation("from_storage", string(transformer.Prefix), start, err)
}
// It is valid to have overlapping prefixes when the same encryption provider
// is specified multiple times but with different keys (the first provider is
// being rotated to and some later provider is being rotated away from).
//
// Example:
//
// {
// "aescbc": {
// "keys": [
// {
// "name": "2",
// "secret": "some key 2"
// }
// ]
// }
// },
// {
// "aescbc": {
// "keys": [
// {
// "name": "1",
// "secret": "some key 1"
// }
// ]
// }
// },
//
// The transformers for both aescbc configs share the prefix k8s:enc:aescbc:v1:
// but a failure in the first one should not prevent a later match from being attempted.
// Thus we never short-circuit on a prefix match that results in an error.
if err != nil {
errs = append(errs, err)
continue
}
return result, stale || i != 0, err
}
}
if err := errors.Reduce(errors.NewAggregate(errs)); err != nil {
return nil, false, err
}
RecordTransformation("from_storage", "unknown", start, t.err)
return nil, false, t.err
}

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "16"
gitVersion = "v1.16.0-k3s.1"
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
gitVersion = "v1.16.2-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
gitTreeState = "clean"
buildDate = "2019-09-27T21:33Z"
buildDate = "2019-10-16T05:17Z"
)

View File

@ -209,7 +209,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS
// if the controller should shutdown
// callers should prefer WaitForNamedCacheSync()
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
err := wait.PollUntil(syncedPollPeriod,
err := wait.PollImmediateUntil(syncedPollPeriod,
func() (bool, error) {
for _, syncFunc := range cacheSyncs {
if !syncFunc() {

View File

@ -158,6 +158,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "16"
gitVersion = "v1.16.0-k3s.1"
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
gitVersion = "v1.16.2-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
gitTreeState = "clean"
buildDate = "2019-09-27T21:33Z"
buildDate = "2019-10-16T05:17Z"
)

View File

@ -139,6 +139,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "16"
gitVersion = "v1.16.0-k3s.1"
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
gitVersion = "v1.16.2-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
gitTreeState = "clean"
buildDate = "2019-09-27T21:33Z"
buildDate = "2019-10-16T05:17Z"
)

View File

@ -585,14 +585,6 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
}
}
// If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 &&
kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce {
if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil {
return err
}
}
if kubeDeps.Auth == nil {
auth, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration)
if err != nil {
@ -722,6 +714,14 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
return err
}
// If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 &&
kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce {
if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil {
return err
}
}
if s.HealthzPort > 0 {
mux := http.NewServeMux()
healthz.InstallHandler(mux)

View File

@ -202,6 +202,11 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
klog.V(3).Infof("Error deleting Secret: %v", err)
}
} else if ttl > 0 {
tc.queue.AddAfter(o, ttl)
key, err := controller.KeyFunc(o)
if err != nil {
utilruntime.HandleError(err)
return
}
tc.queue.AddAfter(key, ttl)
}
}

View File

@ -293,7 +293,7 @@ func GetNUMANodeInfo() (NUMANodeInfo, error) {
// nil NUMANodeInfo, indicating that no NUMA information is available
// on this machine. This should implicitly be interpreted as having a
// single NUMA node with id 0 for all CPUs.
nodelist, err := ioutil.ReadFile("/sys/devices/system/node/possible")
nodelist, err := ioutil.ReadFile("/sys/devices/system/node/online")
if err != nil {
return nil, nil
}

View File

@ -192,7 +192,7 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
// If hints is nil, insert a single, preferred any-numa hint into allProviderHints.
if len(hints) == 0 {
klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with any resource")
allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, true}})
allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}})
continue
}
@ -200,13 +200,13 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
for resource := range hints {
if hints[resource] == nil {
klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with resource '%s'", resource)
allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, true}})
allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}})
continue
}
if len(hints[resource]) == 0 {
klog.Infof("[topologymanager] Hint Provider has no possible NUMA affinities for resource '%s'", resource)
allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, false}})
allProviderHints = append(allProviderHints, []TopologyHint{{nil, false}})
continue
}
@ -226,18 +226,21 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
preferred := true
var numaAffinities []socketmask.SocketMask
for _, hint := range permutation {
// Only consider hints that have an actual NUMANodeAffinity set.
if hint.NUMANodeAffinity != nil {
if !hint.Preferred {
preferred = false
}
// Special case PolicySingleNumaNode to only prefer hints where
// all providers have a single NUMA affinity set.
if m.policy != nil && m.policy.Name() == PolicySingleNumaNode && hint.NUMANodeAffinity.Count() > 1 {
preferred = false
}
if hint.NUMANodeAffinity == nil {
numaAffinities = append(numaAffinities, defaultAffinity)
} else {
numaAffinities = append(numaAffinities, hint.NUMANodeAffinity)
}
if !hint.Preferred {
preferred = false
}
// Special case PolicySingleNumaNode to only prefer hints where
// all providers have a single NUMA affinity set.
if m.policy != nil && m.policy.Name() == PolicySingleNumaNode && hint.NUMANodeAffinity != nil && hint.NUMANodeAffinity.Count() > 1 {
preferred = false
}
}
// Merge the affinities using a bitwise-and operation.

View File

@ -42,6 +42,8 @@ type streamingRuntime struct {
var _ streaming.Runtime = &streamingRuntime{}
const maxMsgSize = 1024 * 1024 * 16
func (r *streamingRuntime) Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return r.exec(containerID, cmd, in, out, err, tty, resize, 0)
}
@ -78,8 +80,8 @@ func (ds *dockerService) ExecSync(_ context.Context, req *runtimeapi.ExecSyncReq
var stdoutBuffer, stderrBuffer bytes.Buffer
err := ds.streamingRuntime.exec(req.ContainerId, req.Cmd,
nil, // in
ioutils.WriteCloserWrapper(&stdoutBuffer),
ioutils.WriteCloserWrapper(&stderrBuffer),
ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stdoutBuffer, maxMsgSize)),
ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stderrBuffer, maxMsgSize)),
false, // tty
nil, // resize
timeout)

View File

@ -28,7 +28,7 @@ import (
func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
stderr := new(bytes.Buffer)
err := r.exec(podSandboxID, []string{"wincat.exe", "localhost", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0)
err := r.exec(podSandboxID, []string{"wincat.exe", "127.0.0.1", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0)
if err != nil {
return fmt.Errorf("%v: %s", err, stderr.String())
}

View File

@ -80,10 +80,7 @@ const (
"ipam": {
"type": "host-local",
"ranges": [%s],
"routes": [
{ "dst": "%s" },
{ "dst": "%s" }
]
"routes": [%s]
}
}`
)
@ -283,7 +280,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
//setup hairpinMode
setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), zeroCIDRv4, zeroCIDRv6)
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), plugin.getRoutesConfig())
klog.V(4).Infof("CNI network config set to %v", json)
plugin.netConfig, err = libcni.ConfFromBytes([]byte(json))
if err != nil {
@ -844,6 +841,29 @@ func (plugin *kubenetNetworkPlugin) getRangesConfig() string {
return strings.Join(ranges[:], ",")
}
// given a n cidrs assigned to nodes,
// create bridge routes configuration that conforms to them
func (plugin *kubenetNetworkPlugin) getRoutesConfig() string {
var (
routes []string
hasV4, hasV6 bool
)
for _, thisCIDR := range plugin.podCIDRs {
if thisCIDR.IP.To4() != nil {
hasV4 = true
} else {
hasV6 = true
}
}
if hasV4 {
routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv4))
}
if hasV6 {
routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv6))
}
return strings.Join(routes, ",")
}
func (plugin *kubenetNetworkPlugin) addPodIP(id kubecontainer.ContainerID, ip string) {
plugin.mu.Lock()
defer plugin.mu.Unlock()

View File

@ -58,6 +58,7 @@ go_test(
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/status:go_default_library",
"//pkg/kubelet/status/testing:go_default_library",
"//pkg/kubelet/util/ioutils:go_default_library",
"//pkg/probe:go_default_library",
"//pkg/probe/exec:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -252,63 +252,68 @@ func formatURL(scheme string, host string, port int, path string) *url.URL {
type execInContainer struct {
// run executes a command in a container. Combined stdout and stderr output is always returned. An
// error is returned if one occurred.
run func() ([]byte, error)
run func() ([]byte, error)
writer io.Writer
}
func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
return execInContainer{func() ([]byte, error) {
return &execInContainer{run: func() ([]byte, error) {
return pb.runner.RunInContainer(containerID, cmd, timeout)
}}
}
func (eic execInContainer) Run() error {
return fmt.Errorf("unimplemented")
func (eic *execInContainer) Run() error {
return nil
}
func (eic execInContainer) CombinedOutput() ([]byte, error) {
func (eic *execInContainer) CombinedOutput() ([]byte, error) {
return eic.run()
}
func (eic execInContainer) Output() ([]byte, error) {
func (eic *execInContainer) Output() ([]byte, error) {
return nil, fmt.Errorf("unimplemented")
}
func (eic execInContainer) SetDir(dir string) {
func (eic *execInContainer) SetDir(dir string) {
//unimplemented
}
func (eic execInContainer) SetStdin(in io.Reader) {
func (eic *execInContainer) SetStdin(in io.Reader) {
//unimplemented
}
func (eic execInContainer) SetStdout(out io.Writer) {
func (eic *execInContainer) SetStdout(out io.Writer) {
eic.writer = out
}
func (eic *execInContainer) SetStderr(out io.Writer) {
eic.writer = out
}
func (eic *execInContainer) SetEnv(env []string) {
//unimplemented
}
func (eic execInContainer) SetStderr(out io.Writer) {
func (eic *execInContainer) Stop() {
//unimplemented
}
func (eic execInContainer) SetEnv(env []string) {
//unimplemented
func (eic *execInContainer) Start() error {
data, err := eic.run()
if eic.writer != nil {
eic.writer.Write(data)
}
return err
}
func (eic execInContainer) Stop() {
//unimplemented
func (eic *execInContainer) Wait() error {
return nil
}
func (eic execInContainer) Start() error {
return fmt.Errorf("unimplemented")
}
func (eic execInContainer) Wait() error {
return fmt.Errorf("unimplemented")
}
func (eic execInContainer) StdoutPipe() (io.ReadCloser, error) {
func (eic *execInContainer) StdoutPipe() (io.ReadCloser, error) {
return nil, fmt.Errorf("unimplemented")
}
func (eic execInContainer) StderrPipe() (io.ReadCloser, error) {
func (eic *execInContainer) StderrPipe() (io.ReadCloser, error) {
return nil, fmt.Errorf("unimplemented")
}

View File

@ -1,9 +1,6 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -23,3 +20,10 @@ filegroup(
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["ioutils_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)

View File

@ -35,3 +35,36 @@ func (w *writeCloserWrapper) Close() error {
func WriteCloserWrapper(w io.Writer) io.WriteCloser {
return &writeCloserWrapper{w}
}
// LimitWriter is a copy of the standard library ioutils.LimitReader,
// applied to the writer interface.
// LimitWriter returns a Writer that writes to w
// but stops with EOF after n bytes.
// The underlying implementation is a *LimitedWriter.
func LimitWriter(w io.Writer, n int64) io.Writer { return &LimitedWriter{w, n} }
// A LimitedWriter writes to W but limits the amount of
// data returned to just N bytes. Each call to Write
// updates N to reflect the new amount remaining.
// Write returns EOF when N <= 0 or when the underlying W returns EOF.
type LimitedWriter struct {
W io.Writer // underlying writer
N int64 // max bytes remaining
}
func (l *LimitedWriter) Write(p []byte) (n int, err error) {
if l.N <= 0 {
return 0, io.ErrShortWrite
}
truncated := false
if int64(len(p)) > l.N {
p = p[0:l.N]
truncated = true
}
n, err = l.W.Write(p)
l.N -= int64(n)
if err == nil && truncated {
err = io.ErrShortWrite
}
return
}

View File

@ -11,6 +11,7 @@ go_library(
srcs = ["exec.go"],
importpath = "k8s.io/kubernetes/pkg/probe/exec",
deps = [
"//pkg/kubelet/util/ioutils:go_default_library",
"//pkg/probe:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",

View File

@ -17,10 +17,17 @@ limitations under the License.
package exec
import (
"bytes"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/utils/exec"
"k8s.io/klog"
"k8s.io/utils/exec"
)
const (
maxReadLength = 10 * 1 << 10 // 10KB
)
// New creates a Prober.
@ -39,7 +46,17 @@ type execProber struct{}
// from executing a command. Returns the Result status, command output, and
// errors if any.
func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) {
data, err := e.CombinedOutput()
var dataBuffer bytes.Buffer
writer := ioutils.LimitWriter(&dataBuffer, maxReadLength)
e.SetStderr(writer)
e.SetStdout(writer)
err := e.Start()
if err == nil {
err = e.Wait()
}
data := dataBuffer.Bytes()
klog.V(4).Infof("Exec probe response: %q", string(data))
if err != nil {
exit, ok := err.(exec.ExitError)

View File

@ -51,17 +51,16 @@ func NewInterPodAffinityPriority(
}
type podAffinityPriorityMap struct {
// nodes contain all nodes that should be considered
// nodes contain all nodes that should be considered.
nodes []*v1.Node
// counts store the mapping from node name to so-far computed score of
// the node.
counts map[string]*int64
// counts store the so-far computed score for each node.
counts []int64
}
func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap {
return &podAffinityPriorityMap{
nodes: nodes,
counts: make(map[string]*int64, len(nodes)),
counts: make([]int64, len(nodes)),
}
}
@ -73,9 +72,9 @@ func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefini
}
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector)
if match {
for _, node := range p.nodes {
for i, node := range p.nodes {
if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
atomic.AddInt64(p.counts[node.Name], weight)
atomic.AddInt64(&p.counts[i], weight)
}
}
}
@ -102,17 +101,11 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
// priorityMap stores the mapping from node name to so-far computed score of
// the node.
// pm stores (1) all nodes that should be considered and (2) the so-far computed score for each node.
pm := newPodAffinityPriorityMap(nodes)
allNodeNames := make([]string, 0, len(nodeNameToInfo))
lazyInit := hasAffinityConstraints || hasAntiAffinityConstraints
for name := range nodeNameToInfo {
allNodeNames = append(allNodeNames, name)
// if pod has affinity defined, or target node has affinityPods
if lazyInit || len(nodeNameToInfo[name].PodsWithAffinity()) != 0 {
pm.counts[name] = new(int64)
}
}
// convert the topology key based weights to the node name based weights
@ -216,25 +209,22 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
return nil, err
}
for _, node := range nodes {
if pm.counts[node.Name] == nil {
continue
for i := range nodes {
if pm.counts[i] > maxCount {
maxCount = pm.counts[i]
}
if *pm.counts[node.Name] > maxCount {
maxCount = *pm.counts[node.Name]
}
if *pm.counts[node.Name] < minCount {
minCount = *pm.counts[node.Name]
if pm.counts[i] < minCount {
minCount = pm.counts[i]
}
}
// calculate final priority score for each node
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
maxMinDiff := maxCount - minCount
for _, node := range nodes {
for i, node := range nodes {
fScore := float64(0)
if maxMinDiff > 0 && pm.counts[node.Name] != nil {
fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount))
if maxMinDiff > 0 {
fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
}
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
if klog.V(10) {

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "16"
gitVersion = "v1.16.0-k3s.1"
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
gitVersion = "v1.16.2-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
gitTreeState = "clean"
buildDate = "2019-09-27T21:33Z"
buildDate = "2019-10-16T05:17Z"
)

View File

@ -150,17 +150,23 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI.
func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error {
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
if err != nil {
if err == cloudprovider.InstanceNotFound {
// if host doesn't exist, no need to detach
klog.Warningf("azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached",
nodeName, diskURI)
return nil
}
klog.Warningf("failed to get azure instance id (%v)", err)
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
}
vmset, err := c.getNodeVMSet(nodeName)
if err != nil {
return err
}
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
if err != nil {
klog.Warningf("failed to get azure instance id (%v)", err)
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
}
klog.V(2).Infof("detach %v from node %q", diskURI, nodeName)
// make the lock here as small as possible

View File

@ -86,8 +86,9 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
defer cancel()
// Invalidate the cache right after updating
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
if err = ss.deleteCacheForNode(vmName); err != nil {
return err
}
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
@ -157,8 +158,9 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
defer cancel()
// Invalidate the cache right after updating
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
if err = ss.deleteCacheForNode(vmName); err != nil {
return nil, err
}
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")

View File

@ -104,18 +104,38 @@ const (
clusterNameKey = "kubernetes-cluster-name"
)
// GetLoadBalancer returns whether the specified load balancer exists, and
// GetLoadBalancer returns whether the specified load balancer and its components exist, and
// if so, what its status is.
func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
_, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false)
// Since public IP is not a part of the load balancer on Azure,
// there is a chance that we could orphan public IP resources while we delete the load blanacer (kubernetes/kubernetes#80571).
// We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure.
existsPip := func() bool {
pipName, _, err := az.determinePublicIPName(clusterName, service)
if err != nil {
return false
}
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
_, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
if err != nil {
return false
}
return existsPip
}()
_, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false)
if err != nil {
return nil, false, err
return nil, existsPip, err
}
if !exists {
// Return exists = false only if the load balancer and the public IP are not found on Azure
if !existsLb && !existsPip {
serviceName := getServiceName(service)
klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
return nil, false, nil
}
// Return exists = true if either the load balancer or the public IP (or both) exists
return status, true, nil
}
@ -169,6 +189,10 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
// UpdateLoadBalancer updates hosts under the specified load balancer.
func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
if !az.shouldUpdateLoadBalancer(clusterName, service) {
klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name)
return nil
}
_, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes)
return err
}
@ -475,7 +499,7 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s
return service.Spec.LoadBalancerIP, nil
}
lbStatus, existsLb, err := az.GetLoadBalancer(ctx, clusterName, service)
_, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false)
if err != nil {
return "", err
}
@ -546,8 +570,12 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
if ipv6 {
pip.PublicIPAddressVersion = network.IPv6
klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
// static allocation on IPv6 on Azure is not allowed
pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Dynamic
if az.useStandardLoadBalancer() {
// standard sku must have static allocation method for ipv6
pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Static
}
} else {
pip.PublicIPAddressVersion = network.IPv4
klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
@ -1279,6 +1307,11 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
return &sg, nil
}
func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service) bool {
_, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nil, false)
return existsLb && service.ObjectMeta.DeletionTimestamp == nil
}
func logSafe(s *string) string {
if s == nil {
return "(nil)"

View File

@ -25,6 +25,7 @@ import (
"sort"
"strconv"
"strings"
"sync"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
@ -60,10 +61,8 @@ type scaleSet struct {
// (e.g. master nodes) may not belong to any scale sets.
availabilitySet VMSet
vmssCache *timedCache
vmssVMCache *timedCache
nodeNameToScaleSetMappingCache *timedCache
availabilitySetNodesCache *timedCache
vmssVMCache *timedCache
availabilitySetNodesCache *timedCache
}
// newScaleSet creates a new scaleSet.
@ -74,22 +73,12 @@ func newScaleSet(az *Cloud) (VMSet, error) {
availabilitySet: newAvailabilitySet(az),
}
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
if err != nil {
return nil, err
}
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
if err != nil {
return nil, err
}
ss.vmssCache, err = ss.newVmssCache()
if err != nil {
return nil, err
}
ss.vmssVMCache, err = ss.newVmssVMCache()
ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache()
if err != nil {
return nil, err
}
@ -99,39 +88,46 @@ func newScaleSet(az *Cloud) (VMSet, error) {
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
instanceID, err = getScaleSetVMInstanceID(nodeName)
func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
if err != nil {
return "", "", nil, err
}
virtualMachines := cached.(*sync.Map)
if vm, ok := virtualMachines.Load(nodeName); ok {
result := vm.(*vmssVirtualMachinesEntry)
return result.vmssName, result.instanceID, result.virtualMachine, nil
}
return "", "", nil, nil
}
_, err := getScaleSetVMInstanceID(nodeName)
if err != nil {
return ssName, instanceID, vm, err
return "", "", nil, err
}
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
vmssName, instanceID, vm, err := getter(nodeName)
if err != nil {
return ssName, instanceID, vm, err
return "", "", nil, err
}
if vm != nil {
return vmssName, instanceID, vm, nil
}
if ssName == "" {
return "", "", vm, cloudprovider.InstanceNotFound
}
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
klog.V(3).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName)
ss.vmssVMCache.Delete(vmssVirtualMachinesKey)
vmssName, instanceID, vm, err = getter(nodeName)
if err != nil {
return "", "", vm, err
return "", "", nil, err
}
klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID))
cachedVM, err := ss.vmssVMCache.Get(key)
if err != nil {
return ssName, instanceID, vm, err
if vm == nil {
return "", "", nil, cloudprovider.InstanceNotFound
}
if cachedVM == nil {
klog.Errorf("Can't find node (%q) in any scale sets", nodeName)
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
}
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
return vmssName, instanceID, vm, nil
}
// GetPowerStatusByNodeName returns the power state of the specified node.
@ -158,20 +154,49 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets.
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
key := buildVmssCacheKey(resourceGroup, vmName)
cachedVM, err := ss.vmssVMCache.Get(key)
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (*compute.VirtualMachineScaleSetVM, error) {
getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
if err != nil {
return nil, false, err
}
virtualMachines := cached.(*sync.Map)
virtualMachines.Range(func(key, value interface{}) bool {
vmEntry := value.(*vmssVirtualMachinesEntry)
if strings.EqualFold(vmEntry.resourceGroup, resourceGroup) &&
strings.EqualFold(vmEntry.vmssName, scaleSetName) &&
strings.EqualFold(vmEntry.instanceID, instanceID) {
vm = vmEntry.virtualMachine
found = true
return false
}
return true
})
return vm, found, nil
}
vm, found, err := getter()
if err != nil {
return vm, err
return nil, err
}
if found {
return vm, nil
}
if cachedVM == nil {
klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID)
return vm, cloudprovider.InstanceNotFound
klog.V(3).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID)
ss.vmssVMCache.Delete(vmssVirtualMachinesKey)
vm, found, err = getter()
if err != nil {
return nil, err
}
if !found {
return nil, cloudprovider.InstanceNotFound
}
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
return vm, nil
}
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
@ -463,9 +488,15 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
return nil, err
}
ssNames := make([]string, len(allScaleSets))
for i := range allScaleSets {
ssNames[i] = *(allScaleSets[i].Name)
ssNames := make([]string, 0)
for _, vmss := range allScaleSets {
name := *vmss.Name
if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 {
klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name)
continue
}
ssNames = append(ssNames, name)
}
return ssNames, nil
@ -500,7 +531,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
}
nodeName := nodes[nx].Name
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
ssName, _, _, err := ss.getVmssVM(nodeName)
if err != nil {
return nil, err
}
@ -599,7 +630,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
return network.Interface{}, err
}
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
primaryInterfaceID, err := ss.getPrimaryInterfaceID(*vm)
if err != nil {
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
return network.Interface{}, err
@ -816,8 +847,9 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
}
// Invalidate the cache since we would update it.
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
if err = ss.deleteCacheForNode(vmName); err != nil {
return err
}
// Update vmssVM with backoff.
ctx, cancel := getContextWithCancel()
@ -1094,8 +1126,9 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa
}
// Invalidate the cache since we would update it.
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
defer ss.vmssVMCache.Delete(key)
if err = ss.deleteCacheForNode(nodeName); err != nil {
return err
}
// Update vmssVM with backoff.
ctx, cancel := getContextWithCancel()

View File

@ -21,8 +21,12 @@ package azure
import (
"fmt"
"strings"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
)
@ -31,18 +35,19 @@ var (
vmssNameSeparator = "_"
vmssCacheSeparator = "#"
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey"
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
vmssCacheTTL = time.Minute
vmssVMCacheTTL = time.Minute
availabilitySetNodesCacheTTL = 5 * time.Minute
nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute
availabilitySetNodesCacheTTL = 15 * time.Minute
vmssVirtualMachinesTTL = 10 * time.Minute
)
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
// The map is required because vmss nodeName is not equal to its vmName.
type nodeNameToScaleSetMapping map[string]string
type vmssVirtualMachinesEntry struct {
resourceGroup string
vmssName string
instanceID string
virtualMachine *compute.VirtualMachineScaleSetVM
}
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
@ -62,32 +67,9 @@ func extractVmssVMName(name string) (string, string, error) {
return ssName, instanceID, nil
}
// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups
// will be excluded from LB backends.
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
return nil, nil
}
return &result, nil
}
return newTimedcache(vmssCacheTTL, getter)
}
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
localCache := make(nodeNameToScaleSetMapping)
localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
allResourceGroups, err := ss.GetResourceGroups()
if err != nil {
@ -106,14 +88,20 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
return nil, err
}
for _, vm := range vms {
for i := range vms {
vm := vms[i]
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
klog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
continue
}
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
localCache[computerName] = ssName
localCache.Store(computerName, &vmssVirtualMachinesEntry{
resourceGroup: resourceGroup,
vmssName: ssName,
instanceID: to.String(vm.InstanceID),
virtualMachine: &vm,
})
}
}
}
@ -121,7 +109,18 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
return localCache, nil
}
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
return newTimedcache(vmssVirtualMachinesTTL, getter)
}
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
if err != nil {
return err
}
virtualMachines := cached.(*sync.Map)
virtualMachines.Delete(nodeName)
return nil
}
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
@ -151,109 +150,6 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
return newTimedcache(availabilitySetNodesCacheTTL, getter)
}
func buildVmssCacheKey(resourceGroup, name string) string {
// key is composed of <resourceGroup>#<vmName>
return fmt.Sprintf("%s%s%s", strings.ToLower(resourceGroup), vmssCacheSeparator, name)
}
func extractVmssCacheKey(key string) (string, string, error) {
// key is composed of <resourceGroup>#<vmName>
keyItems := strings.Split(key, vmssCacheSeparator)
if len(keyItems) != 2 {
return "", "", fmt.Errorf("key %q is not in format '<resourceGroup>#<vmName>'", key)
}
resourceGroup := keyItems[0]
vmName := keyItems[1]
return resourceGroup, vmName, nil
}
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
getter := func(key string) (interface{}, error) {
// key is composed of <resourceGroup>#<vmName>
resourceGroup, vmName, err := extractVmssCacheKey(key)
if err != nil {
return nil, err
}
// vmName's format is 'scaleSetName_instanceID'
ssName, instanceID, err := extractVmssVMName(vmName)
if err != nil {
return nil, err
}
// Not found, the VM doesn't belong to any known scale sets.
if ssName == "" {
return nil, nil
}
ctx, cancel := getContextWithCancel()
defer cancel()
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID)
exists, message, realErr := checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
return nil, nil
}
// Get instanceView for vmssVM.
if result.InstanceView == nil {
viewCtx, viewCancel := getContextWithCancel()
defer viewCancel()
view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID)
// It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again.
exists, message, realErr = checkResourceExistsFromError(err)
if realErr != nil {
return nil, realErr
}
if !exists {
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
return nil, nil
}
result.InstanceView = &view
}
return &result, nil
}
return newTimedcache(vmssVMCacheTTL, getter)
}
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
getScaleSetName := func(nodeName string) (string, error) {
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
if err != nil {
return "", err
}
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
if ssName, ok := realMapping[nodeName]; ok {
return ssName, nil
}
return "", nil
}
ssName, err := getScaleSetName(nodeName)
if err != nil {
return "", err
}
if ssName != "" {
return ssName, nil
}
// ssName is still not found, it is likely that new Nodes are created.
// Force refresh the cache and try again.
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
return getScaleSetName(nodeName)
}
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
if err != nil {

View File

@ -1132,7 +1132,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error {
return fmt.Errorf("cannot create directory %s: %s", currentPath, err)
}
// Dive into the created directory
childFD, err := syscall.Openat(parentFD, dir, nofollowFlags, 0)
childFD, err = syscall.Openat(parentFD, dir, nofollowFlags, 0)
if err != nil {
return fmt.Errorf("cannot open %s: %s", currentPath, err)
}

44
vendor/modules.txt vendored
View File

@ -1084,9 +1084,9 @@ gopkg.in/square/go-jose.v2/cipher
gopkg.in/square/go-jose.v2/json
# gopkg.in/warnings.v0 v0.1.1
gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.2
# gopkg.in/yaml.v2 v2.2.4
gopkg.in/yaml.v2
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.0-k3s.1
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1
k8s.io/api/core/v1
k8s.io/api/apps/v1
k8s.io/api/admissionregistration/v1
@ -1128,7 +1128,7 @@ k8s.io/api/storage/v1beta1
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/imagepolicy/v1alpha1
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.0-k3s.1
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/apiserver
@ -1176,7 +1176,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio
k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.0-k3s.1
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/net
k8s.io/apimachinery/pkg/apis/meta/v1
@ -1238,7 +1238,7 @@ k8s.io/apimachinery/pkg/api/meta/table
k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation
k8s.io/apimachinery/pkg/runtime/serializer/yaml
k8s.io/apimachinery/pkg/util/duration
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.0-k3s.1
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1
k8s.io/apiserver/pkg/authentication/authenticator
k8s.io/apiserver/pkg/endpoints/request
k8s.io/apiserver/pkg/server
@ -1351,7 +1351,7 @@ k8s.io/apiserver/pkg/registry/generic/rest
k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.0-k3s.1
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
@ -1364,7 +1364,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.0-k3s.1
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1
k8s.io/client-go/tools/clientcmd
k8s.io/client-go/util/cert
k8s.io/client-go/kubernetes
@ -1550,20 +1550,20 @@ k8s.io/client-go/listers/node/v1alpha1
k8s.io/client-go/listers/scheduling/v1alpha1
k8s.io/client-go/listers/scheduling/v1beta1
k8s.io/client-go/listers/storage/v1alpha1
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.0-k3s.1
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1
k8s.io/cloud-provider
k8s.io/cloud-provider/volume/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/node/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.0-k3s.1
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1
k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens
k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.0-k3s.1
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1
k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/types
@ -1578,7 +1578,7 @@ k8s.io/code-generator/pkg/util
k8s.io/code-generator/cmd/client-gen/generators/fake
k8s.io/code-generator/cmd/client-gen/generators/scheme
k8s.io/code-generator/pkg/namer
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.0-k3s.1
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1
k8s.io/component-base/logs
k8s.io/component-base/cli/flag
k8s.io/component-base/metrics/prometheus/restclient
@ -1591,10 +1591,10 @@ k8s.io/component-base/metrics
k8s.io/component-base/featuregate
k8s.io/component-base/config/v1alpha1
k8s.io/component-base/config/validation
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.0-k3s.1
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
k8s.io/cri-api/pkg/apis
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.0-k3s.1
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1
k8s.io/csi-translation-lib/plugins
k8s.io/csi-translation-lib
# k8s.io/gengo v0.0.0-20190822140433-26a664648505
@ -1609,7 +1609,7 @@ k8s.io/gengo/examples/set-gen/sets
k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v0.4.0
k8s.io/klog
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.0-k3s.1
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
@ -1637,7 +1637,7 @@ k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1b
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1
k8s.io/kube-aggregator/pkg/apis/apiregistration/validation
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.0-k3s.1
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1
k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf
k8s.io/kube-openapi/pkg/builder
@ -1648,11 +1648,11 @@ k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/aggregator
k8s.io/kube-openapi/pkg/util/proto/validation
k8s.io/kube-openapi/pkg/schemaconv
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.0-k3s.1
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1
k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.0-k3s.1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1
k8s.io/kube-scheduler/config/v1alpha1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.0-k3s.1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate
@ -1729,9 +1729,9 @@ k8s.io/kubectl/pkg/util/fieldpath
k8s.io/kubectl/pkg/util/qos
k8s.io/kubectl/pkg/util/resource
k8s.io/kubectl/pkg/util/storage
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.0-k3s.1
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1
k8s.io/kubelet/config/v1beta1
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.0-k3s.1
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.2-k3s.1
k8s.io/kubernetes/cmd/hyperkube
k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1
k8s.io/kubernetes/pkg/kubelet/util
@ -2447,7 +2447,7 @@ k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config/v1alpha1
k8s.io/kubernetes/pkg/apis/abac/v1beta1
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/pkg/util/maps
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.0-k3s.1
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1
k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/gce
@ -2457,7 +2457,7 @@ k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/azure/auth
k8s.io/legacy-cloud-providers/openstack/util/mount
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.0-k3s.1
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1
k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
k8s.io/metrics/pkg/client/custom_metrics
k8s.io/metrics/pkg/client/external_metrics