Merge pull request #1079 from erikwilson/bump-k8s-1.16.3-k3s.1

Bump k8s to v1.16.3-k3s.1
This commit is contained in:
Erik Wilson 2019-11-14 16:33:04 -07:00 committed by GitHub
commit fe4b9cafd6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 470 additions and 334 deletions

50
go.mod
View File

@ -32,31 +32,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1 k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.3-k3s.1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1 k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.3-k3s.1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1 k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.3-k3s.1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1 k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.3-k3s.1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1 k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.3-k3s.1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1 k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.3-k3s.1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1 k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.3-k3s.1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1 k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.3-k3s.1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1 k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.3-k3s.1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1 k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.3-k3s.1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1 k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.3-k3s.1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1 k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.3-k3s.1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1 k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.3-k3s.1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1 k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.3-k3s.1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1 k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.3-k3s.1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1 k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.3-k3s.1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1 k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.3-k3s.1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1 k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.3-k3s.1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.2-k3s.1 k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.3-k3s.1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1 k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.3-k3s.1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1 k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.3-k3s.1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.2-k3s.1 k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.16.3-k3s.1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.2-k3s.1 k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.3-k3s.1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.2-k3s.1 k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.3-k3s.1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.2-k3s.1 k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.3-k3s.1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
) )

92
go.sum
View File

@ -117,14 +117,12 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVut031zfdOzI9w= github.com/containernetworking/plugins v0.8.2 h1:5lnwfsAYO+V7yXhysJKy3E1A2Gy9oVut031zfdOzI9w=
github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc= github.com/containernetworking/plugins v0.8.2/go.mod h1:TxALKWZpWL79BC3GOYKJzzXr7U8R23PdhwaLp6F3adc=
github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E=
github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A=
github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.15+incompatible h1:+9RjdC18gMxNQVvSiXvObLu29mOFmkgdsB4cRTlV+EE= github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=
github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-iptables v0.4.2 h1:KH0EwId05JwWIfb96gWvkiT2cbuOu8ygqUaB+yPAwIg= github.com/coreos/go-iptables v0.4.2 h1:KH0EwId05JwWIfb96gWvkiT2cbuOu8ygqUaB+yPAwIg=
github.com/coreos/go-iptables v0.4.2/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.4.2/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -594,49 +592,49 @@ github.com/rancher/helm-controller v0.2.2 h1:MUqisy53/Ay1EYOF2uTCYBbGpgtZLNKKrI0
github.com/rancher/helm-controller v0.2.2/go.mod h1:0JkL0UjxddNbT4FmLoESarD4Mz8xzA5YlejqJ/U4g+8= github.com/rancher/helm-controller v0.2.2/go.mod h1:0JkL0UjxddNbT4FmLoESarD4Mz8xzA5YlejqJ/U4g+8=
github.com/rancher/kine v0.2.4 h1:Vtv8twV2u3Gh2cOL7vXv69besGc3YahxYeFzICV6GB8= github.com/rancher/kine v0.2.4 h1:Vtv8twV2u3Gh2cOL7vXv69besGc3YahxYeFzICV6GB8=
github.com/rancher/kine v0.2.4/go.mod h1:SdBUuE7e3XyrJvdBxCl9TMMapF+wyZnMZSP/H59OqNE= github.com/rancher/kine v0.2.4/go.mod h1:SdBUuE7e3XyrJvdBxCl9TMMapF+wyZnMZSP/H59OqNE=
github.com/rancher/kubernetes v1.16.2-k3s.1 h1:+oJEecXgQDkEOD/X8z2YUdYVonbXZtGzXsmtKDPYesg= github.com/rancher/kubernetes v1.16.3-k3s.1 h1:gLZXEFTR1aX98fUzVPcWRZh55vZS/b4x3Iw5crVCVq4=
github.com/rancher/kubernetes v1.16.2-k3s.1/go.mod h1:SmhGgKfQ30imqjFVj8AI+iW+zSyFsswNErKYeTfgoH0= github.com/rancher/kubernetes v1.16.3-k3s.1/go.mod h1:hJd0X6w7E/MiE7PcDp11XHhdgQBYc33vP+WtTJqG/AU=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1 h1:2kK5KD6MU86txBYKG+tM6j5zbey02DaIDtwpG5JsfnI= github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.3-k3s.1 h1:8O9g5yAmBOx+icT3CfbmpMfxXxk3SDXY82tdxkHKXuc=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1/go.mod h1:cHpnPcbNeE90PrTRnTu13OM+FN+ROt82odVbEh++81o= github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.3-k3s.1/go.mod h1:cHpnPcbNeE90PrTRnTu13OM+FN+ROt82odVbEh++81o=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1 h1:vZGFZHN6AmoIRdzj57gIB3oya7pb17wWDHD/ZKB+k68= github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.3-k3s.1 h1:Tr3lN4J2zj41c2NePPDxlviBQikBHblgs1Gv/w38ckI=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1/go.mod h1:CfhfFElTQ5DKDOaHZ6/P2QeJM9Dkg9udFx7Vp3wE3eo= github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.3-k3s.1/go.mod h1:tXCXeG1YI6F9buUotzmBdjCRCJfM0xfoNX5782iBQPA=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1 h1:Iaa5tbsSuMZqvXYwosTs+2fyqkFTwDNOyXmkoe6J1aA= github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.3-k3s.1 h1:nVM8nBNAciTYpSRFepLegrnZo1iko16yQ1yuEqt1OUo=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1/go.mod h1:I9gveEHn8RBUsRZ1zR7UbePt/ySCzzxsG1dWwpKNN5E= github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.3-k3s.1/go.mod h1:I9gveEHn8RBUsRZ1zR7UbePt/ySCzzxsG1dWwpKNN5E=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1 h1:47GCm5H0FV2uWM9w8/x7MAMbty/DgXmvvslrrUB0FZs= github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.3-k3s.1 h1:lLtOGFgTOiyGFF3l5jYRIGeQ9L604kxs0P+JLw2p22Q=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1/go.mod h1:tUn8Kq7rjvsqTXw4Ku6HT6lyaUAtz46fVolVnz+DZlw= github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.3-k3s.1/go.mod h1:+c9OY4l2MPZsWcuJPzAJfdMb6wpt6srdvgaOEvuSfcA=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1 h1:CmCcCfMqu//cm8cTTYwMPV6MDWpWLLDpDdWrGUghvHw= github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.3-k3s.1 h1:Sm/Xq877rSnRvjimBxhv801U5W+2G0UrmeBeeIyiBXQ=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1/go.mod h1:nBogvbgjMgo7AeVA6CuqVO13LVIfmlQ11t6xzAJdBN8= github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.3-k3s.1/go.mod h1:nBogvbgjMgo7AeVA6CuqVO13LVIfmlQ11t6xzAJdBN8=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1 h1:zwn4rTNrX7RTlLLE2+shx3X+6LHnub8Zdpoh44Q+FhY= github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.3-k3s.1 h1:ff2aXnfcVyZTUIcQiSUsJOs4idKPzs5vZMeanjgvUAE=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1/go.mod h1:GiGfbsjtP4tOW6zgpL8/vCUoyXAV5+9X2onLursPi08= github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.3-k3s.1/go.mod h1:GiGfbsjtP4tOW6zgpL8/vCUoyXAV5+9X2onLursPi08=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1 h1:qbNYhEltz9DRXcSOeMjDvvegiLSG8jWdUhkU2D/8KSk= github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.3-k3s.1 h1:0PwJiF8rwYOudmwsXqg2KFcY9W4VQfIasj7Vz/ULzVo=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1/go.mod h1:77Vtl0d5SOrs6vqwqhZZQakDEovGSm2rRqtpTeteqcQ= github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.3-k3s.1/go.mod h1:77Vtl0d5SOrs6vqwqhZZQakDEovGSm2rRqtpTeteqcQ=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1 h1:efPol+sRfueqsVUj/cUXYaAHqM0WGbLdoqBxxTvWrQY= github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.3-k3s.1 h1:VdXe+eDhRV55QARfGsT0hfxNNWM0N1jKsOK0EHzmq2k=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1/go.mod h1:Gwev4EWWC1Yfr0gBTJR0n8FYLsIdRu4ARubU6hXRadU= github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.3-k3s.1/go.mod h1:Gwev4EWWC1Yfr0gBTJR0n8FYLsIdRu4ARubU6hXRadU=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1 h1:MfD1MXvfc81W2KEbwobebkgCZNqR4ExD9lajYDorA/A= github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.3-k3s.1 h1:DaFwN+mi0AxL7yGBVr9Qz7S0Zagvr1z7u383qSq2g9M=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1/go.mod h1:uzCZz0cC/uXDgpjpMZ7lFzglGU/9tXyTiPDcX92d6OI= github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.3-k3s.1/go.mod h1:uzCZz0cC/uXDgpjpMZ7lFzglGU/9tXyTiPDcX92d6OI=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1 h1:eA5RmPXBgUTbc82Gch3T1tcuJFP6T4iE7aUdusFXVOY= github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.3-k3s.1 h1:3u4S45TVAifDKvfsXP2jn4I8THpKLpglLUKZhqK4N5Y=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1/go.mod h1:spPP+vRNS8EsnNNIhFCZTTuRO3XhV1WoF18HJySoZn8= github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.3-k3s.1/go.mod h1:spPP+vRNS8EsnNNIhFCZTTuRO3XhV1WoF18HJySoZn8=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1 h1:rs9F4np5cLbEBew/9jMH5/j651FhSs7KuRZD2eOIKR0= github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.3-k3s.1 h1:NjcKZ9cQShDlSGJeRjiaUYPwJ0Z3tYIcwwg0KpMY/c8=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1/go.mod h1:FuWtYjatYStosiEepg0w/7/QrG0T/HMh/FA5T/8AIP8= github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.3-k3s.1/go.mod h1:FuWtYjatYStosiEepg0w/7/QrG0T/HMh/FA5T/8AIP8=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1 h1:q9cfT/qFOHKIjF9mPauwhmGUfRV8n+U9735Cf5q0bMA= github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.3-k3s.1 h1:GrFM/f47LsPGC05UKto0xUTFQpIvmLTnOkHC0uwkdWk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1/go.mod h1:w51XnEBJkmGEjUGylUXL1TezQIc0JYndQCsVkQMHjKA= github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.3-k3s.1/go.mod h1:w51XnEBJkmGEjUGylUXL1TezQIc0JYndQCsVkQMHjKA=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1 h1:1uID+qLmE02FmTa9Ory7zKJJSyW23BCQEGTB/24WCoE= github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.3-k3s.1 h1:d3hViQNdWxYdr1bHBSmgT+XXvCpEAFe4TnBDDqFLkX4=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1/go.mod h1:ttKFRQ6/4l0mjLwPJ/Ccn9k/vc/6y5dJ98r88NLLiGw= github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.3-k3s.1/go.mod h1:ttKFRQ6/4l0mjLwPJ/Ccn9k/vc/6y5dJ98r88NLLiGw=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1 h1:xFBNpH30wgcJ9lAlXKNQyJmz8YxiMxn8UCbXSEQ3gxQ= github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.3-k3s.1 h1:UlqZvsVdASNf4paguk+7h/BFISxOU57HpSBRYZrizkc=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1/go.mod h1:Wm4X9LSXr3uszFEajh8M75iyxHdjOKSp0LCL4TIp7UQ= github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.3-k3s.1/go.mod h1:Wm4X9LSXr3uszFEajh8M75iyxHdjOKSp0LCL4TIp7UQ=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1 h1:Xq9l0M8ZK3jikoiVqLGRdLVA4P8QftfQ/lD8395Fuhc= github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.3-k3s.1 h1:DtcwZshq8wsp4ruz8M2MxFQPExdnJB0EylnzFM5OA5E=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1/go.mod h1:8btekvQmHgyy4XTchusVAW/mQIPE+hVLn61sZ/epsAA= github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.3-k3s.1/go.mod h1:8btekvQmHgyy4XTchusVAW/mQIPE+hVLn61sZ/epsAA=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1 h1:G+WwhP95psm9fSQ+OHcXEVVLlUSjVoHgD+6nLoEWNuY= github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.3-k3s.1 h1:idwXj0qxcbVInN4jOICQjVbkOldhmmLiFw6/hnF7/Ls=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1/go.mod h1:sBq5nR6KVpfnkBsj4RjOQhw0j5yOtLHXIX2Dz5uZQmw= github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.3-k3s.1/go.mod h1:sBq5nR6KVpfnkBsj4RjOQhw0j5yOtLHXIX2Dz5uZQmw=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1 h1:OgATrfXxWVmBjmlZtV9oq+G9XddY3051GyI7lhaNKws= github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.3-k3s.1 h1:wP5U/iMCdNvPGIK969Ab6mIyI3ZAWsvAGoFWCifFukc=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1/go.mod h1:b4UoBV6BbZRU3F2VDqLsXqWFeNUhT2EtirOINqUzgOs= github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.3-k3s.1/go.mod h1:b4UoBV6BbZRU3F2VDqLsXqWFeNUhT2EtirOINqUzgOs=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1 h1:4hK23wpRnc38rDaapHOk2d6DNOF4GhDdbISUVXSRQR8= github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.3-k3s.1 h1:cvEWUijQ7c8NYXIO59cDWYpvcQxxJr65SXfmCiT7zaM=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1/go.mod h1:4Sbo2Vn3tAIZpwx4YIp+SushTtzzzabVrg9Tq4rrImM= github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.3-k3s.1/go.mod h1:4Sbo2Vn3tAIZpwx4YIp+SushTtzzzabVrg9Tq4rrImM=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1 h1:8tbJkA/XARMaf3/3Kas4K7MF4uQOUkRmz4aiEgfnBrM= github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.3-k3s.1 h1:Qmi68MupDDAISmWxgiHVJjaHqIKO9Qm+EzqlAQShOuU=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1/go.mod h1:OpqDei2/Qdg+5YGQYPiEuQ4vlFoiAJy0Ysn8aLKP7Cs= github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.3-k3s.1/go.mod h1:OpqDei2/Qdg+5YGQYPiEuQ4vlFoiAJy0Ysn8aLKP7Cs=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1 h1:Br9g854hWBNf1JDWesBZ11dNQxnYtGaWGSLOc6ITXds= github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.3-k3s.1 h1:DDok1lGxUI7EURrr3l0SOJ9A/9002gZtiZ4RlCfCdd4=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1/go.mod h1:vQHTmz0IaEb7/OXPSor1uga8Er0V+2M5aSdXG832NbU= github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.3-k3s.1/go.mod h1:vQHTmz0IaEb7/OXPSor1uga8Er0V+2M5aSdXG832NbU=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.2-k3s.1/go.mod h1:E3i4wscD52Qj6PEcgUjvCd81Tl6Mghk1GHtEzoaaqwU= github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.3-k3s.1/go.mod h1:E3i4wscD52Qj6PEcgUjvCd81Tl6Mghk1GHtEzoaaqwU=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009 h1:Xsxh7fX3+2wAUJtPy8g2lZh0cYuyifqhBL0vxCIYojs= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009 h1:Xsxh7fX3+2wAUJtPy8g2lZh0cYuyifqhBL0vxCIYojs=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc= github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=

View File

@ -16,7 +16,9 @@
package endpoint package endpoint
import ( import (
"context"
"fmt" "fmt"
"net"
"net/url" "net/url"
"strings" "strings"
"sync" "sync"
@ -228,13 +230,18 @@ func ParseTarget(target string) (string, string, error) {
return parts[0], parts[1], nil return parts[0], parts[1], nil
} }
// ParseHostPort splits a "<host>:<port>" string into the host and port parts. // Dialer dials a endpoint using net.Dialer.
// The port part is optional. // Context cancelation and timeout are supported.
func ParseHostPort(hostPort string) (host string, port string) { func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
parts := strings.SplitN(hostPort, ":", 2) proto, host, _ := ParseEndpoint(dialEp)
host = parts[0] select {
if len(parts) > 1 { case <-ctx.Done():
port = parts[1] return nil, ctx.Err()
default:
} }
return host, port dialer := &net.Dialer{}
if deadline, ok := ctx.Deadline(); ok {
dialer.Deadline = deadline
}
return dialer.DialContext(ctx, proto, host)
} }

View File

@ -25,13 +25,13 @@ import (
"sync" "sync"
"time" "time"
"github.com/google/uuid"
"github.com/coreos/etcd/clientv3/balancer" "github.com/coreos/etcd/clientv3/balancer"
"github.com/coreos/etcd/clientv3/balancer/picker" "github.com/coreos/etcd/clientv3/balancer/picker"
"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint" "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
"github.com/coreos/etcd/clientv3/credentials" "github.com/coreos/etcd/clientv3/credentials"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"github.com/coreos/etcd/pkg/logutil" "github.com/coreos/etcd/pkg/logutil"
"github.com/google/uuid"
"go.uber.org/zap" "go.uber.org/zap"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -226,24 +226,17 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
} }
opts = append(opts, dopts...) opts = append(opts, dopts...)
// Provide a net dialer that supports cancelation and timeout. dialer := endpoint.Dialer
f := func(dialEp string, t time.Duration) (net.Conn, error) {
proto, host, _ := endpoint.ParseEndpoint(dialEp)
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
return dialer.DialContext(c.ctx, proto, host)
}
opts = append(opts, grpc.WithDialer(f))
if creds != nil { if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(creds)) opts = append(opts, grpc.WithTransportCredentials(creds))
// gRPC load balancer workaround. See credentials.transportCredential for details.
if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
dialer = credsDialer.Dialer
}
} else { } else {
opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithInsecure())
} }
opts = append(opts, grpc.WithContextDialer(dialer))
// Interceptor retry and backoff. // Interceptor retry and backoff.
// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
@ -262,7 +255,10 @@ func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts
// Dial connects to a single endpoint using the client's config. // Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
creds := c.directDialCreds(ep) creds, err := c.directDialCreds(ep)
if err != nil {
return nil, err
}
// Use the grpc passthrough resolver to directly dial a single endpoint. // Use the grpc passthrough resolver to directly dial a single endpoint.
// This resolver passes through the 'unix' and 'unixs' endpoints schemes used // This resolver passes through the 'unix' and 'unixs' endpoints schemes used
// by etcd without modification, allowing us to directly dial endpoints and // by etcd without modification, allowing us to directly dial endpoints and
@ -365,8 +361,8 @@ func (c *Client) dial(target string, creds grpccredentials.TransportCredentials,
return conn, nil return conn, nil
} }
func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials { func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
_, hostPort, scheme := endpoint.ParseEndpoint(ep) _, host, scheme := endpoint.ParseEndpoint(ep)
creds := c.creds creds := c.creds
if len(scheme) != 0 { if len(scheme) != 0 {
creds = c.processCreds(scheme) creds = c.processCreds(scheme)
@ -375,12 +371,17 @@ func (c *Client) directDialCreds(ep string) grpccredentials.TransportCredentials
// Set the server name must to the endpoint hostname without port since grpc // Set the server name must to the endpoint hostname without port since grpc
// otherwise attempts to check if x509 cert is valid for the full endpoint // otherwise attempts to check if x509 cert is valid for the full endpoint
// including the scheme and port, which fails. // including the scheme and port, which fails.
host, _ := endpoint.ParseHostPort(hostPort) overrideServerName, _, err := net.SplitHostPort(host)
clone.OverrideServerName(host) if err != nil {
// Either the host didn't have a port or the host could not be parsed. Either way, continue with the
// original host string.
overrideServerName = host
}
clone.OverrideServerName(overrideServerName)
creds = clone creds = clone
} }
} }
return creds return creds, nil
} }
func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials { func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
@ -659,3 +660,9 @@ func IsConnCanceled(err error) bool {
// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
return strings.Contains(err.Error(), "grpc: the client connection is closing") return strings.Contains(err.Error(), "grpc: the client connection is closing")
} }
// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
type TransportCredentialsWithDialer interface {
grpccredentials.TransportCredentials
Dialer(ctx context.Context, dialEp string) (net.Conn, error)
}

View File

@ -22,6 +22,7 @@ import (
"net" "net"
"sync" "sync"
"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
grpccredentials "google.golang.org/grpc/credentials" grpccredentials "google.golang.org/grpc/credentials"
) )
@ -65,38 +66,37 @@ func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
} }
// transportCredential implements "grpccredentials.TransportCredentials" interface. // transportCredential implements "grpccredentials.TransportCredentials" interface.
// transportCredential wraps TransportCredentials to track which
// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
// hostname or IP of the dialed endpoint.
// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
// in their TLS certs.
// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
// when dialing.
type transportCredential struct { type transportCredential struct {
gtc grpccredentials.TransportCredentials gtc grpccredentials.TransportCredentials
mu sync.Mutex
// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
// endpoint provided to the dialer when dialing
addrToEndpoint map[string]string
} }
func newTransportCredential(cfg *tls.Config) *transportCredential { func newTransportCredential(cfg *tls.Config) *transportCredential {
return &transportCredential{ return &transportCredential{
gtc: grpccredentials.NewTLS(cfg), gtc: grpccredentials.NewTLS(cfg),
addrToEndpoint: map[string]string{},
} }
} }
func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) { func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
// Only overwrite when authority is an IP address! // Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
// Let's say, a server runs SRV records on "etcd.local" that resolves tc.mu.Lock()
// to "m1.etcd.local", and its SAN field also includes "m1.etcd.local". dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
// But what if SAN does not include its resolved IP address (e.g. 127.0.0.1)? tc.mu.Unlock()
// Then, the server should only authenticate using its DNS hostname "m1.etcd.local", if ok {
// instead of overwriting it with its IP address. _, host, _ := endpoint.ParseEndpoint(dialEp)
// And we do not overwrite "localhost" either. Only overwrite IP addresses! authority = host
if isIP(authority) {
target := rawConn.RemoteAddr().String()
if authority != target {
// When user dials with "grpc.WithDialer", "grpc.DialContext" "cc.parsedTarget"
// update only happens once. This is problematic, because when TLS is enabled,
// retries happen through "grpc.WithDialer" with static "cc.parsedTarget" from
// the initial dial call.
// If the server authenticates by IP addresses, we want to set a new endpoint as
// a new authority. Otherwise
// "transport: authentication handshake failed: x509: certificate is valid for 127.0.0.1, 192.168.121.180, not 192.168.223.156"
// when the new dial target is "192.168.121.180" whose certificate host name is also "192.168.121.180"
// but client tries to authenticate with previously set "cc.parsedTarget" field "192.168.223.156"
authority = target
}
} }
return tc.gtc.ClientHandshake(ctx, authority, rawConn) return tc.gtc.ClientHandshake(ctx, authority, rawConn)
} }
@ -115,8 +115,15 @@ func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
} }
func (tc *transportCredential) Clone() grpccredentials.TransportCredentials { func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
copy := map[string]string{}
tc.mu.Lock()
for k, v := range tc.addrToEndpoint {
copy[k] = v
}
tc.mu.Unlock()
return &transportCredential{ return &transportCredential{
gtc: tc.gtc.Clone(), gtc: tc.gtc.Clone(),
addrToEndpoint: copy,
} }
} }
@ -124,6 +131,17 @@ func (tc *transportCredential) OverrideServerName(serverNameOverride string) err
return tc.gtc.OverrideServerName(serverNameOverride) return tc.gtc.OverrideServerName(serverNameOverride)
} }
func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
// Keep track of which addresses are dialed for which endpoints
conn, err := endpoint.Dialer(ctx, dialEp)
if conn != nil {
tc.mu.Lock()
tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
tc.mu.Unlock()
}
return conn, err
}
// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface. // perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
type perRPCCredential struct { type perRPCCredential struct {
authToken string authToken string

View File

@ -53,6 +53,9 @@ func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listene
if scheme != "https" && scheme != "unixs" { if scheme != "https" && scheme != "unixs" {
return l, nil return l, nil
} }
if tlsinfo != nil && tlsinfo.SkipClientSANVerify {
return NewTLSListener(l, tlsinfo)
}
return newTLSListener(l, tlsinfo, checkSAN) return newTLSListener(l, tlsinfo, checkSAN)
} }
@ -65,6 +68,8 @@ type TLSInfo struct {
CRLFile string CRLFile string
InsecureSkipVerify bool InsecureSkipVerify bool
SkipClientSANVerify bool
// ServerName ensures the cert matches the given host in case of discovery / virtual hosting // ServerName ensures the cert matches the given host in case of discovery / virtual hosting
ServerName string ServerName string
@ -95,7 +100,7 @@ func (info TLSInfo) Empty() bool {
return info.CertFile == "" && info.KeyFile == "" return info.CertFile == "" && info.KeyFile == ""
} }
func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { func SelfCert(dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) {
if err = os.MkdirAll(dirpath, 0700); err != nil { if err = os.MkdirAll(dirpath, 0700); err != nil {
return return
} }
@ -124,7 +129,7 @@ func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) {
NotAfter: time.Now().Add(365 * (24 * time.Hour)), NotAfter: time.Now().Add(365 * (24 * time.Hour)),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
BasicConstraintsValid: true, BasicConstraintsValid: true,
} }

View File

@ -45,7 +45,7 @@ type IntOrString struct {
} }
// Type represents the stored type of IntOrString. // Type represents the stored type of IntOrString.
type Type int type Type int64
const ( const (
Int Type = iota // The IntOrString holds an int. Int Type = iota // The IntOrString holds an int.

View File

@ -341,7 +341,7 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
if len(p.patchBytes) > 1024*1024 { if len(p.patchBytes) > 1024*1024 {
v := []interface{}{} v := []interface{}{}
if err := json.Unmarshal(p.patchBytes, v); err != nil { if err := json.Unmarshal(p.patchBytes, &v); err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
} }
} }
@ -365,7 +365,7 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
if len(p.patchBytes) > 1024*1024 { if len(p.patchBytes) > 1024*1024 {
v := map[string]interface{}{} v := map[string]interface{}{}
if err := json.Unmarshal(p.patchBytes, v); err != nil { if err := json.Unmarshal(p.patchBytes, &v); err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
} }
} }

View File

@ -285,10 +285,12 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
streamBuf := &bytes.Buffer{} streamBuf := &bytes.Buffer{}
ch := s.Watching.ResultChan() ch := s.Watching.ResultChan()
defer s.Watching.Stop()
for { for {
select { select {
case <-done: case <-done:
s.Watching.Stop()
return return
case event, ok := <-ch: case event, ok := <-ch:
if !ok { if !ok {
@ -317,25 +319,21 @@ func (s *WatchServer) HandleWS(ws *websocket.Conn) {
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err)) utilruntime.HandleError(fmt.Errorf("unable to convert watch object: %v", err))
// client disconnect. // client disconnect.
s.Watching.Stop()
return return
} }
if err := s.Encoder.Encode(outEvent, streamBuf); err != nil { if err := s.Encoder.Encode(outEvent, streamBuf); err != nil {
// encoding error // encoding error
utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err)) utilruntime.HandleError(fmt.Errorf("unable to encode event: %v", err))
s.Watching.Stop()
return return
} }
if s.UseTextFraming { if s.UseTextFraming {
if err := websocket.Message.Send(ws, streamBuf.String()); err != nil { if err := websocket.Message.Send(ws, streamBuf.String()); err != nil {
// Client disconnect. // Client disconnect.
s.Watching.Stop()
return return
} }
} else { } else {
if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil { if err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil {
// Client disconnect. // Client disconnect.
s.Watching.Stop()
return return
} }
} }

View File

@ -178,7 +178,7 @@ var (
}, },
[]string{"group", "version", "kind"}, []string{"group", "version", "kind"},
) )
// Because of volatality of the base metric this is pre-aggregated one. Instead of reporing current usage all the time // Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time
// it reports maximal usage during the last second. // it reports maximal usage during the last second.
currentInflightRequests = compbasemetrics.NewGaugeVec( currentInflightRequests = compbasemetrics.NewGaugeVec(
&compbasemetrics.GaugeOpts{ &compbasemetrics.GaugeOpts{
@ -188,6 +188,15 @@ var (
}, },
[]string{"requestKind"}, []string{"requestKind"},
) )
requestTerminationsTotal = compbasemetrics.NewCounterVec(
&compbasemetrics.CounterOpts{
Name: "apiserver_request_terminations_total",
Help: "Number of requests which apiserver terminated in self-defense.",
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"},
)
kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`)
metrics = []resettableCollector{ metrics = []resettableCollector{
@ -204,6 +213,7 @@ var (
WatchEvents, WatchEvents,
WatchEventsSizes, WatchEventsSizes,
currentInflightRequests, currentInflightRequests,
requestTerminationsTotal,
} }
) )
@ -237,10 +247,11 @@ func UpdateInflightRequestMetrics(nonmutating, mutating int) {
currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating)) currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating))
} }
// Record records a single request to the standard metrics endpoints. For use by handlers that perform their own // RecordRequestTermination records that the request was terminated early as part of a resource
// processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if // preservation or apiserver self-defense mechanism (e.g. timeouts, maxinflight throttling,
// you already have a RequestInfo object. // proxyHandler errors). RecordRequestTermination should only be called zero or one times
func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { // per request.
func RecordRequestTermination(req *http.Request, requestInfo *request.RequestInfo, component string, code int) {
if requestInfo == nil { if requestInfo == nil {
requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path}
} }
@ -252,9 +263,9 @@ func Record(req *http.Request, requestInfo *request.RequestInfo, component, cont
// However, we need to tweak it e.g. to differentiate GET from LIST. // However, we need to tweak it e.g. to differentiate GET from LIST.
verb := canonicalVerb(strings.ToUpper(req.Method), scope) verb := canonicalVerb(strings.ToUpper(req.Method), scope)
if requestInfo.IsResourceRequest { if requestInfo.IsResourceRequest {
MonitorRequest(req, verb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed) requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc()
} else { } else {
MonitorRequest(req, verb, "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed) requestTerminationsTotal.WithLabelValues(cleanVerb(verb, req), "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc()
} }
} }

View File

@ -178,7 +178,7 @@ func WithMaxInFlightLimit(
} }
} }
} }
metrics.Record(r, requestInfo, metrics.APIServerComponent, "", http.StatusTooManyRequests, 0, 0) metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests)
tooManyRequests(r, w) tooManyRequests(r, w)
} }
} }

View File

@ -59,7 +59,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apir
postTimeoutFn := func() { postTimeoutFn := func() {
cancel() cancel()
metrics.Record(req, requestInfo, metrics.APIServerComponent, "", http.StatusGatewayTimeout, 0, 0) metrics.RecordRequestTermination(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout)
} }
return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0)
} }

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "16" gitMinor = "16"
gitVersion = "v1.16.2-k3s.1" gitVersion = "v1.16.3-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6" gitCommit = "bc4447825d97497c0b3ab4aa70da9b87f117094d"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2019-10-16T05:17Z" buildDate = "2019-11-14T17:48:31Z"
) )

View File

@ -295,13 +295,6 @@ func isDeletionDup(a, b *Delta) *Delta {
return b return b
} }
// willObjectBeDeletedLocked returns true only if the last delta for the
// given object is Delete. Caller must lock first.
func (f *DeltaFIFO) willObjectBeDeletedLocked(id string) bool {
deltas := f.items[id]
return len(deltas) > 0 && deltas[len(deltas)-1].Type == Deleted
}
// queueActionLocked appends to the delta list for the object. // queueActionLocked appends to the delta list for the object.
// Caller must lock first. // Caller must lock first.
func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error {
@ -310,13 +303,6 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
return KeyError{obj, err} return KeyError{obj, err}
} }
// If object is supposed to be deleted (last event is Deleted),
// then we should ignore Sync events, because it would result in
// recreation of this object.
if actionType == Sync && f.willObjectBeDeletedLocked(id) {
return nil
}
newDeltas := append(f.items[id], Delta{actionType, obj}) newDeltas := append(f.items[id], Delta{actionType, obj})
newDeltas = dedupDeltas(newDeltas) newDeltas = dedupDeltas(newDeltas)

View File

@ -129,6 +129,25 @@ type EventBroadcaster interface {
NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder
} }
// EventRecorderAdapter is a wrapper around EventRecorder implementing the
// new EventRecorder interface.
type EventRecorderAdapter struct {
recorder EventRecorder
}
// NewEventRecorderAdapter returns an adapter implementing new EventRecorder
// interface.
func NewEventRecorderAdapter(recorder EventRecorder) *EventRecorderAdapter {
return &EventRecorderAdapter{
recorder: recorder,
}
}
// Eventf is a wrapper around v1 Eventf
func (a *EventRecorderAdapter) Eventf(regarding, _ runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
a.recorder.Eventf(regarding, eventtype, reason, note, args...)
}
// Creates a new event broadcaster. // Creates a new event broadcaster.
func NewBroadcaster() EventBroadcaster { func NewBroadcaster() EventBroadcaster {
return &eventBroadcasterImpl{ return &eventBroadcasterImpl{

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "16" gitMinor = "16"
gitVersion = "v1.16.2-k3s.1" gitVersion = "v1.16.3-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6" gitCommit = "bc4447825d97497c0b3ab4aa70da9b87f117094d"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2019-10-16T05:17Z" buildDate = "2019-11-14T17:48:31Z"
) )

View File

@ -100,7 +100,7 @@ func proxyError(w http.ResponseWriter, req *http.Request, error string, code int
return return
} }
// TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver // TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver
endpointmetrics.Record(req, info, aggregatorComponent, "", code, 0, 0) endpointmetrics.RecordRequestTermination(req, info, aggregatorComponent, code)
} }
func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "16" gitMinor = "16"
gitVersion = "v1.16.2-k3s.1" gitVersion = "v1.16.3-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6" gitCommit = "bc4447825d97497c0b3ab4aa70da9b87f117094d"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2019-10-16T05:17Z" buildDate = "2019-11-14T17:48:31Z"
) )

View File

@ -1,14 +1,10 @@
package(default_visibility = ["//visibility:public"]) load("@io_bazel_rules_go//go:def.bzl", "go_library")
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = ["server.go"], srcs = ["server.go"],
importpath = "k8s.io/kubernetes/cmd/kube-scheduler/app", importpath = "k8s.io/kubernetes/cmd/kube-scheduler/app",
visibility = ["//visibility:public"],
deps = [ deps = [
"//cmd/kube-scheduler/app/config:go_default_library", "//cmd/kube-scheduler/app/config:go_default_library",
"//cmd/kube-scheduler/app/options:go_default_library", "//cmd/kube-scheduler/app/options:go_default_library",
@ -22,6 +18,8 @@ go_library(
"//pkg/util/flag:go_default_library", "//pkg/util/flag:go_default_library",
"//pkg/version:go_default_library", "//pkg/version:go_default_library",
"//pkg/version/verflag:go_default_library", "//pkg/version/verflag:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
@ -32,8 +30,11 @@ go_library(
"//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/routes:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/routes:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/term:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/term:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library", "//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//staging/src/k8s.io/component-base/cli/globalflag:go_default_library", "//staging/src/k8s.io/component-base/cli/globalflag:go_default_library",
"//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library",
@ -58,4 +59,5 @@ filegroup(
"//cmd/kube-scheduler/app/testing:all-srcs", "//cmd/kube-scheduler/app/testing:all-srcs",
], ],
tags = ["automanaged"], tags = ["automanaged"],
visibility = ["//visibility:public"],
) )

View File

@ -47,12 +47,12 @@ type Config struct {
Client clientset.Interface Client clientset.Interface
InformerFactory informers.SharedInformerFactory InformerFactory informers.SharedInformerFactory
PodInformer coreinformers.PodInformer PodInformer coreinformers.PodInformer
EventClient v1beta1.EventsGetter
// TODO: Remove the following after fully migrating to the new events api. // TODO: Remove the following after fully migrating to the new events api.
CoreEventClient v1core.EventsGetter CoreEventClient v1core.EventsGetter
LeaderElectionBroadcaster record.EventBroadcaster CoreBroadcaster record.EventBroadcaster
EventClient v1beta1.EventsGetter
Recorder events.EventRecorder Recorder events.EventRecorder
Broadcaster events.EventBroadcaster Broadcaster events.EventBroadcaster

View File

@ -32,7 +32,6 @@ go_library(
"//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library",

View File

@ -34,7 +34,6 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
@ -236,16 +235,13 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
return nil, err return nil, err
} }
// Prepare event clients. coreBroadcaster := record.NewBroadcaster()
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: eventClient.EventsV1beta1().Events("")}) coreRecorder := coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: c.ComponentConfig.SchedulerName})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, c.ComponentConfig.SchedulerName)
leaderElectionBroadcaster := record.NewBroadcaster()
leaderElectionRecorder := leaderElectionBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: c.ComponentConfig.SchedulerName})
// Set up leader election if enabled. // Set up leader election if enabled.
var leaderElectionConfig *leaderelection.LeaderElectionConfig var leaderElectionConfig *leaderelection.LeaderElectionConfig
if c.ComponentConfig.LeaderElection.LeaderElect { if c.ComponentConfig.LeaderElection.LeaderElect {
leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, leaderElectionRecorder) leaderElectionConfig, err = makeLeaderElectionConfig(c.ComponentConfig.LeaderElection, leaderElectionClient, coreRecorder)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -256,9 +252,7 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
c.PodInformer = factory.NewPodInformer(client, 0) c.PodInformer = factory.NewPodInformer(client, 0)
c.EventClient = eventClient.EventsV1beta1() c.EventClient = eventClient.EventsV1beta1()
c.CoreEventClient = eventClient.CoreV1() c.CoreEventClient = eventClient.CoreV1()
c.Recorder = recorder c.CoreBroadcaster = coreBroadcaster
c.Broadcaster = eventBroadcaster
c.LeaderElectionBroadcaster = leaderElectionBroadcaster
c.LeaderElection = leaderElectionConfig c.LeaderElection = leaderElectionConfig
return c, nil return c, nil

View File

@ -27,6 +27,8 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/api/core/v1"
eventsv1beta1 "k8s.io/api/events/v1beta1"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/authorization/authorizer"
@ -37,8 +39,11 @@ import (
"k8s.io/apiserver/pkg/server/mux" "k8s.io/apiserver/pkg/server/mux"
"k8s.io/apiserver/pkg/server/routes" "k8s.io/apiserver/pkg/server/routes"
"k8s.io/apiserver/pkg/util/term" "k8s.io/apiserver/pkg/util/term"
"k8s.io/client-go/kubernetes/scheme"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/events"
"k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/record"
cliflag "k8s.io/component-base/cli/flag" cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/cli/globalflag" "k8s.io/component-base/cli/globalflag"
"k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/metrics/legacyregistry"
@ -168,6 +173,15 @@ func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}, regis
} }
} }
// Prepare event clients.
if _, err := cc.Client.Discovery().ServerResourcesForGroupVersion(eventsv1beta1.SchemeGroupVersion.String()); err == nil {
cc.Broadcaster = events.NewBroadcaster(&events.EventSinkImpl{Interface: cc.EventClient.Events("")})
cc.Recorder = cc.Broadcaster.NewRecorder(scheme.Scheme, cc.ComponentConfig.SchedulerName)
} else {
recorder := cc.CoreBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: cc.ComponentConfig.SchedulerName})
cc.Recorder = record.NewEventRecorderAdapter(recorder)
}
// Create the scheduler. // Create the scheduler.
sched, err := scheduler.New(cc.Client, sched, err := scheduler.New(cc.Client,
cc.InformerFactory.Core().V1().Nodes(), cc.InformerFactory.Core().V1().Nodes(),
@ -200,8 +214,8 @@ func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}, regis
if cc.Broadcaster != nil && cc.EventClient != nil { if cc.Broadcaster != nil && cc.EventClient != nil {
cc.Broadcaster.StartRecordingToSink(stopCh) cc.Broadcaster.StartRecordingToSink(stopCh)
} }
if cc.LeaderElectionBroadcaster != nil && cc.CoreEventClient != nil { if cc.CoreBroadcaster != nil && cc.CoreEventClient != nil {
cc.LeaderElectionBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: cc.CoreEventClient.Events("")}) cc.CoreBroadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: cc.CoreEventClient.Events("")})
} }
// Setup healthz checks. // Setup healthz checks.
var checks []healthz.HealthChecker var checks []healthz.HealthChecker

View File

@ -21,7 +21,7 @@ import (
"time" "time"
apps "k8s.io/api/apps/v1beta1" apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1" "k8s.io/api/extensions/v1beta1"
policy "k8s.io/api/policy/v1beta1" policy "k8s.io/api/policy/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
@ -363,7 +363,19 @@ func (dc *DisruptionController) updateDb(old, cur interface{}) {
} }
func (dc *DisruptionController) removeDb(obj interface{}) { func (dc *DisruptionController) removeDb(obj interface{}) {
pdb := obj.(*policy.PodDisruptionBudget) pdb, ok := obj.(*policy.PodDisruptionBudget)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Couldn't get object from tombstone %+v", obj)
return
}
pdb, ok = tombstone.Obj.(*policy.PodDisruptionBudget)
if !ok {
klog.Errorf("Tombstone contained object that is not a pdb %+v", obj)
return
}
}
klog.V(4).Infof("remove DB %q", pdb.Name) klog.V(4).Infof("remove DB %q", pdb.Name)
dc.enqueuePdb(pdb) dc.enqueuePdb(pdb)
} }

View File

@ -266,11 +266,7 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
return fmt.Errorf("storageClassName does not match") return fmt.Errorf("storageClassName does not match")
} }
isMismatch, err := pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) if pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
if err != nil {
return fmt.Errorf("error checking volumeMode: %v", err)
}
if isMismatch {
return fmt.Errorf("incompatible volumeMode") return fmt.Errorf("incompatible volumeMode")
} }
@ -587,7 +583,7 @@ func (ctrl *PersistentVolumeController) syncVolume(volume *v1.PersistentVolume)
} }
return nil return nil
} else if claim.Spec.VolumeName == "" { } else if claim.Spec.VolumeName == "" {
if isMismatch, err := pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec); err != nil || isMismatch { if pvutil.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
// Binding for the volume won't be called in syncUnboundClaim, // Binding for the volume won't be called in syncUnboundClaim,
// because findBestMatchForClaim won't return the volume due to volumeMode mismatch. // because findBestMatchForClaim won't return the volume due to volumeMode mismatch.
volumeMsg := fmt.Sprintf("Cannot bind PersistentVolume to requested PersistentVolumeClaim %q due to incompatible volumeMode.", claim.Name) volumeMsg := fmt.Sprintf("Cannot bind PersistentVolume to requested PersistentVolumeClaim %q due to incompatible volumeMode.", claim.Name)

View File

@ -19,7 +19,7 @@ package persistentvolume
import ( import (
"fmt" "fmt"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -203,13 +203,8 @@ func FindMatchingVolume(
volumeQty := volume.Spec.Capacity[v1.ResourceStorage] volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
// check if volumeModes do not match (feature gate protected)
isMismatch, err := CheckVolumeModeMismatches(&claim.Spec, &volume.Spec)
if err != nil {
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
}
// filter out mismatching volumeModes // filter out mismatching volumeModes
if isMismatch { if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) {
continue continue
} }
@ -305,9 +300,22 @@ func FindMatchingVolume(
// CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume // CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume
// and PersistentVolumeClaims // and PersistentVolumeClaims
func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) { func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { if !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
return false, nil if pvcSpec.VolumeMode != nil && *pvcSpec.VolumeMode == v1.PersistentVolumeBlock {
// Block PVC does not match anything when the feature is off. We explicitly want
// to prevent binding block PVC to filesystem PV.
// The PVC should be ignored by PV controller.
return true
}
if pvSpec.VolumeMode != nil && *pvSpec.VolumeMode == v1.PersistentVolumeBlock {
// Block PV does not match anything when the feature is off. We explicitly want
// to prevent binding block PV to filesystem PVC.
// The PV should be ignored by PV controller.
return true
}
// Both PV + PVC are not block.
return false
} }
// In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager. // In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager.
@ -320,7 +328,7 @@ func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1
if pvSpec.VolumeMode != nil { if pvSpec.VolumeMode != nil {
pvVolumeMode = *pvSpec.VolumeMode pvVolumeMode = *pvSpec.VolumeMode
} }
return requestedVolumeMode != pvVolumeMode, nil return requestedVolumeMode != pvVolumeMode
} }
// CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes // CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes

View File

@ -54,11 +54,6 @@ func newPerfCounter(counter string) (*perfCounter, error) {
return nil, errors.New("unable to open query through DLL call") return nil, errors.New("unable to open query through DLL call")
} }
ret = win_pdh.PdhValidatePath(counter)
if ret != win_pdh.ERROR_SUCCESS {
return nil, fmt.Errorf("unable to valid path to counter. Error code is %x", ret)
}
ret = win_pdh.PdhAddEnglishCounter(queryHandle, counter, 0, &counterHandle) ret = win_pdh.PdhAddEnglishCounter(queryHandle, counter, 0, &counterHandle)
if ret != win_pdh.ERROR_SUCCESS { if ret != win_pdh.ERROR_SUCCESS {
return nil, fmt.Errorf("unable to add process counter. Error code is %x", ret) return nil, fmt.Errorf("unable to add process counter. Error code is %x", ret)

View File

@ -3,8 +3,8 @@ package version
var ( var (
gitMajor = "1" gitMajor = "1"
gitMinor = "16" gitMinor = "16"
gitVersion = "v1.16.2-k3s.1" gitVersion = "v1.16.3-k3s.1"
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6" gitCommit = "bc4447825d97497c0b3ab4aa70da9b87f117094d"
gitTreeState = "clean" gitTreeState = "clean"
buildDate = "2019-10-16T05:17Z" buildDate = "2019-11-14T17:48:31Z"
) )

View File

@ -445,13 +445,12 @@ func (c *csiAttacher) Detach(volumeName string, nodeName types.NodeName) error {
} }
klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID)) klog.V(4).Info(log("detacher deleted ok VolumeAttachment.ID=%s", attachID))
return c.waitForVolumeDetachment(volID, attachID) return c.waitForVolumeDetachment(volID, attachID, csiTimeout)
} }
func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string) error { func (c *csiAttacher) waitForVolumeDetachment(volumeHandle, attachID string, timeout time.Duration) error {
klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID)) klog.V(4).Info(log("probing for updates from CSI driver for [attachment.ID=%v]", attachID))
timeout := c.waitSleepTime * 10
timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable timer := time.NewTimer(timeout) // TODO (vladimirvivien) investigate making this configurable
defer timer.Stop() defer timer.Stop()

View File

@ -384,6 +384,14 @@ func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName ty
} }
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error { func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error {
if runtime.GOOS == "windows" {
// Flush data cache for windows because it does not do so automatically during unmount device
exec := detacher.host.GetExec(gcePersistentDiskPluginName)
err := volumeutil.WriteVolumeCache(deviceMountPath, exec)
if err != nil {
return err
}
}
return mount.CleanupMountPoint(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName), false) return mount.CleanupMountPoint(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName), false)
} }

View File

@ -22,6 +22,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"runtime"
"strings" "strings"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -29,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" apiruntime "k8s.io/apimachinery/pkg/runtime"
utypes "k8s.io/apimachinery/pkg/types" utypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -196,7 +197,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) {
pod := &v1.Pod{} pod := &v1.Pod{}
codec := legacyscheme.Codecs.UniversalDecoder() codec := legacyscheme.Codecs.UniversalDecoder()
if err := runtime.DecodeInto(codec, podDef, pod); err != nil { if err := apiruntime.DecodeInto(codec, podDef, pod); err != nil {
return nil, fmt.Errorf("failed decoding file: %v", err) return nil, fmt.Errorf("failed decoding file: %v", err)
} }
return pod, nil return pod, nil
@ -590,3 +591,18 @@ func HasMountRefs(mountPath string, mountRefs []string) bool {
} }
return false return false
} }
//WriteVolumeCache flush disk data given the spcified mount path
func WriteVolumeCache(deviceMountPath string, exec mount.Exec) error {
// If runtime os is windows, execute Write-VolumeCache powershell command on the disk
if runtime.GOOS == "windows" {
cmd := fmt.Sprintf("Get-Volume -FilePath %s | Write-Volumecache", deviceMountPath)
output, err := exec.Run("powershell", "/c", cmd)
klog.Infof("command (%q) execeuted: %v, output: %q", cmd, err, string(output))
if err != nil {
return fmt.Errorf("command (%q) failed: %v, output: %q", cmd, err, string(output))
}
}
// For linux runtime, it skips because unmount will automatically flush disk data
return nil
}

View File

@ -65,11 +65,11 @@ func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) {
} }
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) { func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt cacheReadType) (compute.VirtualMachine, error) {
var machine compute.VirtualMachine var machine compute.VirtualMachine
var retryErr error var retryErr error
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
machine, retryErr = az.getVirtualMachine(name) machine, retryErr = az.getVirtualMachine(name, crt)
if retryErr == cloudprovider.InstanceNotFound { if retryErr == cloudprovider.InstanceNotFound {
return true, cloudprovider.InstanceNotFound return true, cloudprovider.InstanceNotFound
} }

View File

@ -26,6 +26,20 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
) )
// cacheReadType defines the read type for cache data
type cacheReadType int
const (
// cacheReadTypeDefault returns data from cache if cache entry not expired
// if cache entry expired, then it will refetch the data using getter
// save the entry in cache and then return
cacheReadTypeDefault cacheReadType = iota
// cacheReadTypeUnsafe returns data from cache even if the cache entry is
// active/expired. If entry doesn't exist in cache, then data is fetched
// using getter, saved in cache and returned
cacheReadTypeUnsafe
)
// getFunc defines a getter function for timedCache. // getFunc defines a getter function for timedCache.
type getFunc func(key string) (interface{}, error) type getFunc func(key string) (interface{}, error)
@ -36,6 +50,8 @@ type cacheEntry struct {
// The lock to ensure not updating same entry simultaneously. // The lock to ensure not updating same entry simultaneously.
lock sync.Mutex lock sync.Mutex
// time when entry was fetched and created
createdOn time.Time
} }
// cacheKeyFunc defines the key function required in TTLStore. // cacheKeyFunc defines the key function required in TTLStore.
@ -48,6 +64,7 @@ type timedCache struct {
store cache.Store store cache.Store
lock sync.Mutex lock sync.Mutex
getter getFunc getter getFunc
ttl time.Duration
} }
// newTimedcache creates a new timedCache. // newTimedcache creates a new timedCache.
@ -58,7 +75,11 @@ func newTimedcache(ttl time.Duration, getter getFunc) (*timedCache, error) {
return &timedCache{ return &timedCache{
getter: getter, getter: getter,
store: cache.NewTTLStore(cacheKeyFunc, ttl), // switch to using NewStore instead of NewTTLStore so that we can
// reuse entries for calls that are fine with reading expired/stalled data.
// with NewTTLStore, entries are not returned if they have already expired.
store: cache.NewStore(cacheKeyFunc),
ttl: ttl,
}, nil }, nil
} }
@ -69,19 +90,15 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
// if entry exists, return the entry
if exists { if exists {
return entry.(*cacheEntry), nil return entry.(*cacheEntry), nil
} }
// lock here to ensure if entry doesn't exist, we add a new entry
// avoiding overwrites
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
entry, exists, err = t.store.GetByKey(key)
if err != nil {
return nil, err
}
if exists {
return entry.(*cacheEntry), nil
}
// Still not found, add new entry with nil data. // Still not found, add new entry with nil data.
// Note the data will be filled later by getter. // Note the data will be filled later by getter.
@ -94,26 +111,38 @@ func (t *timedCache) getInternal(key string) (*cacheEntry, error) {
} }
// Get returns the requested item by key. // Get returns the requested item by key.
func (t *timedCache) Get(key string) (interface{}, error) { func (t *timedCache) Get(key string, crt cacheReadType) (interface{}, error) {
entry, err := t.getInternal(key) entry, err := t.getInternal(key)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Data is still not cached yet, cache it by getter. entry.lock.Lock()
if entry.data == nil { defer entry.lock.Unlock()
entry.lock.Lock()
defer entry.lock.Unlock()
if entry.data == nil { // entry exists
data, err := t.getter(key) if entry.data != nil {
if err != nil { // allow unsafe read, so return data even if expired
return nil, err if crt == cacheReadTypeUnsafe {
} return entry.data, nil
}
entry.data = data // if cached data is not expired, return cached data
if time.Since(entry.createdOn) < t.ttl {
return entry.data, nil
} }
} }
// Data is not cached yet or cache data is expired, cache it by getter.
// entry is locked before getting to ensure concurrent gets don't result in
// multiple ARM calls.
data, err := t.getter(key)
if err != nil {
return nil, err
}
// set the data in cache and also set the last update time
// to now as the data was recently fetched
entry.data = data
entry.createdOn = time.Now().UTC()
return entry.data, nil return entry.data, nil
} }
@ -129,7 +158,8 @@ func (t *timedCache) Delete(key string) error {
// It is only used for testing. // It is only used for testing.
func (t *timedCache) Set(key string, data interface{}) { func (t *timedCache) Set(key string, data interface{}) {
t.store.Add(&cacheEntry{ t.store.Add(&cacheEntry{
key: key, key: key,
data: data, data: data,
createdOn: time.Now().UTC(),
}) })
} }

View File

@ -69,7 +69,7 @@ type controllerCommon struct {
} }
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type. // getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error) { func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt cacheReadType) (VMSet, error) {
// 1. vmType is standard, return cloud.vmSet directly. // 1. vmType is standard, return cloud.vmSet directly.
if c.cloud.VMType == vmTypeStandard { if c.cloud.VMType == vmTypeStandard {
return c.cloud.vmSet, nil return c.cloud.vmSet, nil
@ -82,7 +82,7 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName) (VMSet, error)
} }
// 3. If the node is managed by availability set, then return ss.availabilitySet. // 3. If the node is managed by availability set, then return ss.availabilitySet.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName)) managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName), crt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -124,14 +124,14 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
} }
} }
vmset, err := c.getNodeVMSet(nodeName) vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return -1, err return -1, err
} }
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
if err != nil { if err != nil {
klog.Warningf("failed to get azure instance id (%v)", err) klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName)
return -1, fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) return -1, fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
} }
@ -162,7 +162,7 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
} }
vmset, err := c.getNodeVMSet(nodeName) vmset, err := c.getNodeVMSet(nodeName, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return err return err
} }
@ -197,18 +197,20 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
} }
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node. // getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
vmset, err := c.getNodeVMSet(nodeName) vmset, err := c.getNodeVMSet(nodeName, crt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return vmset.GetDataDisks(nodeName) return vmset.GetDataDisks(nodeName, crt)
} }
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI. // GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
disks, err := c.getNodeDataDisks(nodeName) // getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
// to ensure we get LUN based on latest entry.
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
return -1, err return -1, err
@ -228,7 +230,7 @@ func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.N
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used. // GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
disks, err := c.getNodeDataDisks(nodeName) disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
klog.Errorf("error of getting data disks for node %q: %v", nodeName, err) klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
return -1, err return -1, err
@ -255,7 +257,11 @@ func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.N
attached[diskName] = false attached[diskName] = false
} }
disks, err := c.getNodeDataDisks(nodeName) // doing stalled read for getNodeDataDisks to ensure we don't call ARM
// for every reconcile call. The cache is invalidated after Attach/Detach
// disk. So the new entry will be fetched and cached the first time reconcile
// loop runs after the Attach/Disk OP which will reflect the latest model.
disks, err := c.getNodeDataDisks(nodeName, cacheReadTypeUnsafe)
if err != nil { if err != nil {
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
// if host doesn't exist, no need to detach // if host doesn't exist, no need to detach

View File

@ -31,7 +31,7 @@ import (
// AttachDisk attaches a vhd to vm // AttachDisk attaches a vhd to vm
// the vhd must exist, can be identified by diskName, diskURI, and lun. // the vhd must exist, can be identified by diskName, diskURI, and lun.
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vm, err := as.getVirtualMachine(nodeName) vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
return err return err
} }
@ -102,7 +102,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
// DetachDisk detaches a disk from host // DetachDisk detaches a disk from host
// the vhd can be identified by diskName or diskURI // the vhd can be identified by diskName or diskURI
func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
vm, err := as.getVirtualMachine(nodeName) vm, err := as.getVirtualMachine(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
// if host doesn't exist, no need to detach // if host doesn't exist, no need to detach
klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI) klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI)
@ -155,8 +155,8 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
} }
// GetDataDisks gets a list of data disks attached to the node. // GetDataDisks gets a list of data disks attached to the node.
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
vm, err := as.getVirtualMachine(nodeName) vm, err := as.getVirtualMachine(nodeName, crt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -32,7 +32,7 @@ import (
// the vhd must exist, can be identified by diskName, diskURI, and lun. // the vhd must exist, can be identified by diskName, diskURI, and lun.
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
vmName := mapNodeNameToVMName(nodeName) vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
if err != nil { if err != nil {
return err return err
} }
@ -109,7 +109,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
// the vhd can be identified by diskName or diskURI // the vhd can be identified by diskName or diskURI
func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) { func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) {
vmName := mapNodeNameToVMName(nodeName) vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -167,8 +167,8 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
} }
// GetDataDisks gets a list of data disks attached to the node. // GetDataDisks gets a list of data disks attached to the node.
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { func (ss *scaleSet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
_, _, vm, err := ss.getVmssVM(string(nodeName)) _, _, vm, err := ss.getVmssVM(string(nodeName), crt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -959,7 +959,7 @@ func (f *fakeVMSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
return nil, fmt.Errorf("unimplemented") return nil, fmt.Errorf("unimplemented")
} }
func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) { func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error) {
return nil, fmt.Errorf("unimplemented") return nil, fmt.Errorf("unimplemented")
} }

View File

@ -144,8 +144,9 @@ func (ims *InstanceMetadataService) getInstanceMetadata(key string) (interface{}
} }
// GetMetadata gets instance metadata from cache. // GetMetadata gets instance metadata from cache.
func (ims *InstanceMetadataService) GetMetadata() (*InstanceMetadata, error) { // crt determines if we can get data from stalled cache/need fresh if cache expired.
cache, err := ims.imsCache.Get(metadataCacheKey) func (ims *InstanceMetadataService) GetMetadata(crt cacheReadType) (*InstanceMetadata, error) {
cache, err := ims.imsCache.Get(metadataCacheKey, crt)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -73,7 +73,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
} }
if az.UseInstanceMetadata { if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata() metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -259,7 +259,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
} }
if az.UseInstanceMetadata { if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata() metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -346,7 +346,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
} }
if az.UseInstanceMetadata { if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata() metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -962,7 +962,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
if isInternal { if isInternal {
// Refresh updated lb which will be used later in other places. // Refresh updated lb which will be used later in other places.
newLB, exist, err := az.getAzureLoadBalancer(lbName) newLB, exist, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault)
if err != nil { if err != nil {
klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err) klog.V(2).Infof("reconcileLoadBalancer for service(%s): getAzureLoadBalancer(%s) failed: %v", serviceName, lbName, err)
return nil, err return nil, err
@ -1125,7 +1125,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
ports = []v1.ServicePort{} ports = []v1.ServicePort{}
} }
sg, err := az.getSecurityGroup() sg, err := az.getSecurityGroup(cacheReadTypeDefault)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1466,7 +1466,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, lbNa
} }
if lbName != "" { if lbName != "" {
loadBalancer, _, err := az.getAzureLoadBalancer(lbName) loadBalancer, _, err := az.getAzureLoadBalancer(lbName, cacheReadTypeDefault)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -46,7 +46,7 @@ const (
// ListRoutes lists all managed routes that belong to the specified clusterName // ListRoutes lists all managed routes that belong to the specified clusterName
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
routeTable, existsRouteTable, err := az.getRouteTable() routeTable, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault)
routes, err := processRoutes(routeTable, existsRouteTable, err) routes, err := processRoutes(routeTable, existsRouteTable, err)
if err != nil { if err != nil {
return nil, err return nil, err
@ -102,7 +102,7 @@ func processRoutes(routeTable network.RouteTable, exists bool, err error) ([]*cl
} }
func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error { func (az *Cloud) createRouteTableIfNotExists(clusterName string, kubeRoute *cloudprovider.Route) error {
if _, existsRouteTable, err := az.getRouteTable(); err != nil { if _, existsRouteTable, err := az.getRouteTable(cacheReadTypeDefault); err != nil {
klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) klog.V(2).Infof("createRouteTableIfNotExists error: couldn't get routetable. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
return err return err
} else if existsRouteTable { } else if existsRouteTable {

View File

@ -375,14 +375,14 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
var machine compute.VirtualMachine var machine compute.VirtualMachine
var err error var err error
machine, err = as.getVirtualMachine(types.NodeName(name)) machine, err = as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
if err == cloudprovider.InstanceNotFound { if err == cloudprovider.InstanceNotFound {
return "", cloudprovider.InstanceNotFound return "", cloudprovider.InstanceNotFound
} }
if err != nil { if err != nil {
if as.CloudProviderBackoff { if as.CloudProviderBackoff {
klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name)
machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name)) machine, err = as.GetVirtualMachineWithRetry(types.NodeName(name), cacheReadTypeUnsafe)
if err != nil { if err != nil {
klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name)
return "", err return "", err
@ -403,7 +403,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error)
// GetPowerStatusByNodeName returns the power state of the specified node. // GetPowerStatusByNodeName returns the power state of the specified node.
func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) { func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
vm, err := as.getVirtualMachine(types.NodeName(name)) vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeDefault)
if err != nil { if err != nil {
return powerState, err return powerState, err
} }
@ -436,7 +436,7 @@ func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.Nod
// GetInstanceTypeByNodeName gets the instance type by node name. // GetInstanceTypeByNodeName gets the instance type by node name.
func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) { func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error) {
machine, err := as.getVirtualMachine(types.NodeName(name)) machine, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
if err != nil { if err != nil {
klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err) klog.Errorf("as.GetInstanceTypeByNodeName(%s) failed: as.getVirtualMachine(%s) err=%v", name, name, err)
return "", err return "", err
@ -448,7 +448,7 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running // GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain. // with availability zone, then it returns fault domain.
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
vm, err := as.getVirtualMachine(types.NodeName(name)) vm, err := as.getVirtualMachine(types.NodeName(name), cacheReadTypeUnsafe)
if err != nil { if err != nil {
return cloudprovider.Zone{}, err return cloudprovider.Zone{}, err
} }
@ -649,7 +649,7 @@ func extractResourceGroupByNicID(nicID string) (string, error) {
func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) { func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName string) (network.Interface, error) {
var machine compute.VirtualMachine var machine compute.VirtualMachine
machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName)) machine, err := as.GetVirtualMachineWithRetry(types.NodeName(nodeName), cacheReadTypeDefault)
if err != nil { if err != nil {
klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName)
return network.Interface{}, err return network.Interface{}, err

View File

@ -70,7 +70,7 @@ type VMSet interface {
// DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI. // DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI.
DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error) DetachDisk(diskName, diskURI string, nodeName types.NodeName) (*http.Response, error)
// GetDataDisks gets a list of data disks attached to the node. // GetDataDisks gets a list of data disks attached to the node.
GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) GetDataDisks(nodeName types.NodeName, crt cacheReadType) ([]compute.DataDisk, error)
// GetPowerStatusByNodeName returns the power state of the specified node. // GetPowerStatusByNodeName returns the power state of the specified node.
GetPowerStatusByNodeName(name string) (string, error) GetPowerStatusByNodeName(name string) (string, error)

View File

@ -88,9 +88,9 @@ func newScaleSet(az *Cloud) (VMSet, error) {
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. // getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. // It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) { func (ss *scaleSet) getVmssVM(nodeName string, crt cacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) {
getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) { getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
if err != nil { if err != nil {
return "", "", nil, err return "", "", nil, err
} }
@ -132,7 +132,7 @@ func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.Virtual
// GetPowerStatusByNodeName returns the power state of the specified node. // GetPowerStatusByNodeName returns the power state of the specified node.
func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) {
_, _, vm, err := ss.getVmssVM(name) _, _, vm, err := ss.getVmssVM(name, cacheReadTypeDefault)
if err != nil { if err != nil {
return powerState, err return powerState, err
} }
@ -154,9 +154,9 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
// The node must belong to one of scale sets. // The node must belong to one of scale sets.
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (*compute.VirtualMachineScaleSetVM, error) { func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt cacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
@ -203,7 +203,7 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI
// It must return ("", cloudprovider.InstanceNotFound) if the instance does // It must return ("", cloudprovider.InstanceNotFound) if the instance does
// not exist or is no longer running. // not exist or is no longer running.
func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
if err != nil { if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return "", err return "", err
@ -213,7 +213,7 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) {
return ss.availabilitySet.GetInstanceIDByNodeName(name) return ss.availabilitySet.GetInstanceIDByNodeName(name)
} }
_, _, vm, err := ss.getVmssVM(name) _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -247,7 +247,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
return ss.availabilitySet.GetNodeNameByProviderID(providerID) return ss.availabilitySet.GetNodeNameByProviderID(providerID)
} }
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID) vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -262,7 +262,7 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName,
// GetInstanceTypeByNodeName gets the instance type by node name. // GetInstanceTypeByNodeName gets the instance type by node name.
func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) { func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
if err != nil { if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return "", err return "", err
@ -272,7 +272,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
return ss.availabilitySet.GetInstanceTypeByNodeName(name) return ss.availabilitySet.GetInstanceTypeByNodeName(name)
} }
_, _, vm, err := ss.getVmssVM(name) _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -287,7 +287,7 @@ func (ss *scaleSet) GetInstanceTypeByNodeName(name string) (string, error) {
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running // GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain. // with availability zone, then it returns fault domain.
func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) { func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name) managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, cacheReadTypeUnsafe)
if err != nil { if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return cloudprovider.Zone{}, err return cloudprovider.Zone{}, err
@ -297,7 +297,7 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
return ss.availabilitySet.GetZoneByNodeName(name) return ss.availabilitySet.GetZoneByNodeName(name)
} }
_, _, vm, err := ss.getVmssVM(name) _, _, vm, err := ss.getVmssVM(name, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return cloudprovider.Zone{}, err return cloudprovider.Zone{}, err
} }
@ -315,6 +315,11 @@ func (ss *scaleSet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
} else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil { } else if vm.InstanceView != nil && vm.InstanceView.PlatformFaultDomain != nil {
// Availability zone is not used for the node, falling back to fault domain. // Availability zone is not used for the node, falling back to fault domain.
failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain)) failureDomain = strconv.Itoa(int(*vm.InstanceView.PlatformFaultDomain))
} else {
err = fmt.Errorf("failed to get zone info")
klog.Errorf("GetZoneByNodeName: got unexpected error %v", err)
ss.deleteCacheForNode(name)
return cloudprovider.Zone{}, err
} }
return cloudprovider.Zone{ return cloudprovider.Zone{
@ -531,7 +536,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
} }
nodeName := nodes[nx].Name nodeName := nodes[nx].Name
ssName, _, _, err := ss.getVmssVM(nodeName) ssName, _, _, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -609,7 +614,7 @@ func extractResourceGroupByVMSSNicID(nicID string) (string, error) {
// GetPrimaryInterface gets machine primary network interface by node name and vmSet. // GetPrimaryInterface gets machine primary network interface by node name and vmSet.
func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) { func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, error) {
managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName) managedByAS, err := ss.isNodeManagedByAvailabilitySet(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
return network.Interface{}, err return network.Interface{}, err
@ -619,7 +624,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
return ss.availabilitySet.GetPrimaryInterface(nodeName) return ss.availabilitySet.GetPrimaryInterface(nodeName)
} }
ssName, instanceID, vm, err := ss.getVmssVM(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
// VM is availability set, but not cached yet in availabilitySetNodesCache. // VM is availability set, but not cached yet in availabilitySetNodesCache.
if err == ErrorNotVmssInstance { if err == ErrorNotVmssInstance {
@ -742,7 +747,7 @@ func (ss *scaleSet) getConfigForScaleSetByIPFamily(config *compute.VirtualMachin
func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error {
klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID) klog.V(3).Infof("ensuring node %q of scaleset %q in LB backendpool %q", nodeName, vmSetName, backendPoolID)
vmName := mapNodeNameToVMName(nodeName) vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName) ssName, instanceID, vm, err := ss.getVmssVM(vmName, cacheReadTypeDefault)
if err != nil { if err != nil {
return err return err
} }
@ -1022,7 +1027,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
f := func() error { f := func() error {
// Check whether the node is VMAS virtual machine. // Check whether the node is VMAS virtual machine.
managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName) managedByAS, err := ss.isNodeManagedByAvailabilitySet(localNodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err) klog.Errorf("Failed to check isNodeManagedByAvailabilitySet(%s): %v", localNodeName, err)
return err return err
@ -1063,7 +1068,7 @@ func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
// ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node. // ensureBackendPoolDeletedFromNode ensures the loadBalancer backendAddressPools deleted from the specified node.
func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error { func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeName, backendPoolID string) error {
ssName, instanceID, vm, err := ss.getVmssVM(nodeName) ssName, instanceID, vm, err := ss.getVmssVM(nodeName, cacheReadTypeDefault)
if err != nil { if err != nil {
return err return err
} }
@ -1162,7 +1167,7 @@ func (ss *scaleSet) getNodeNameByIPConfigurationID(ipConfigurationID string) (st
resourceGroup := matches[1] resourceGroup := matches[1]
scaleSetName := matches[2] scaleSetName := matches[2]
instanceID := matches[3] instanceID := matches[3]
vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID) vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -47,6 +47,7 @@ type vmssVirtualMachinesEntry struct {
vmssName string vmssName string
instanceID string instanceID string
virtualMachine *compute.VirtualMachineScaleSetVM virtualMachine *compute.VirtualMachineScaleSetVM
lastUpdate time.Time
} }
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string { func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
@ -101,6 +102,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
vmssName: ssName, vmssName: ssName,
instanceID: to.String(vm.InstanceID), instanceID: to.String(vm.InstanceID),
virtualMachine: &vm, virtualMachine: &vm,
lastUpdate: time.Now().UTC(),
}) })
} }
} }
@ -113,7 +115,7 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
} }
func (ss *scaleSet) deleteCacheForNode(nodeName string) error { func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, cacheReadTypeUnsafe)
if err != nil { if err != nil {
return err return err
} }
@ -150,8 +152,8 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
return newTimedcache(availabilitySetNodesCacheTTL, getter) return newTimedcache(availabilitySetNodesCacheTTL, getter)
} }
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) { func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt cacheReadType) (bool, error) {
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey) cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, crt)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -90,9 +90,9 @@ func ignoreStatusForbiddenFromError(err error) error {
/// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache /// getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
/// The service side has throttling control that delays responses if there're multiple requests onto certain vm /// The service side has throttling control that delays responses if there're multiple requests onto certain vm
/// resource request in short period. /// resource request in short period.
func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualMachine, err error) { func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt cacheReadType) (vm compute.VirtualMachine, err error) {
vmName := string(nodeName) vmName := string(nodeName)
cachedVM, err := az.vmCache.Get(vmName) cachedVM, err := az.vmCache.Get(vmName, crt)
if err != nil { if err != nil {
return vm, err return vm, err
} }
@ -104,8 +104,8 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName) (vm compute.VirtualM
return *(cachedVM.(*compute.VirtualMachine)), nil return *(cachedVM.(*compute.VirtualMachine)), nil
} }
func (az *Cloud) getRouteTable() (routeTable network.RouteTable, exists bool, err error) { func (az *Cloud) getRouteTable(crt cacheReadType) (routeTable network.RouteTable, exists bool, err error) {
cachedRt, err := az.rtCache.Get(az.RouteTableName) cachedRt, err := az.rtCache.Get(az.RouteTableName, crt)
if err != nil { if err != nil {
return routeTable, false, err return routeTable, false, err
} }
@ -168,8 +168,8 @@ func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (subnet
return subnet, exists, err return subnet, exists, err
} }
func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exists bool, err error) { func (az *Cloud) getAzureLoadBalancer(name string, crt cacheReadType) (lb network.LoadBalancer, exists bool, err error) {
cachedLB, err := az.lbCache.Get(name) cachedLB, err := az.lbCache.Get(name, crt)
if err != nil { if err != nil {
return lb, false, err return lb, false, err
} }
@ -181,12 +181,12 @@ func (az *Cloud) getAzureLoadBalancer(name string) (lb network.LoadBalancer, exi
return *(cachedLB.(*network.LoadBalancer)), true, nil return *(cachedLB.(*network.LoadBalancer)), true, nil
} }
func (az *Cloud) getSecurityGroup() (nsg network.SecurityGroup, err error) { func (az *Cloud) getSecurityGroup(crt cacheReadType) (nsg network.SecurityGroup, err error) {
if az.SecurityGroupName == "" { if az.SecurityGroupName == "" {
return nsg, fmt.Errorf("securityGroupName is not configured") return nsg, fmt.Errorf("securityGroupName is not configured")
} }
securityGroup, err := az.nsgCache.Get(az.SecurityGroupName) securityGroup, err := az.nsgCache.Get(az.SecurityGroupName, crt)
if err != nil { if err != nil {
return nsg, err return nsg, err
} }

View File

@ -53,7 +53,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string {
// If the node is not running with availability zones, then it will fall back to fault domain. // If the node is not running with availability zones, then it will fall back to fault domain.
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
if az.UseInstanceMetadata { if az.UseInstanceMetadata {
metadata, err := az.metadata.GetMetadata() metadata, err := az.metadata.GetMetadata(cacheReadTypeUnsafe)
if err != nil { if err != nil {
return cloudprovider.Zone{}, err return cloudprovider.Zone{}, err
} }

46
vendor/modules.txt vendored
View File

@ -340,7 +340,7 @@ github.com/containernetworking/cni/pkg/invoke
github.com/containernetworking/cni/pkg/version github.com/containernetworking/cni/pkg/version
# github.com/containernetworking/plugins v0.8.2 # github.com/containernetworking/plugins v0.8.2
github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/ns
# github.com/coreos/etcd v3.3.15+incompatible # github.com/coreos/etcd v3.3.17+incompatible
github.com/coreos/etcd/clientv3 github.com/coreos/etcd/clientv3
github.com/coreos/etcd/pkg/transport github.com/coreos/etcd/pkg/transport
github.com/coreos/etcd/auth/authpb github.com/coreos/etcd/auth/authpb
@ -1116,7 +1116,7 @@ gopkg.in/square/go-jose.v2/json
gopkg.in/warnings.v0 gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.2.4 # gopkg.in/yaml.v2 v2.2.4
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1 # k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.3-k3s.1
k8s.io/api/core/v1 k8s.io/api/core/v1
k8s.io/api/extensions/v1beta1 k8s.io/api/extensions/v1beta1
k8s.io/api/networking/v1 k8s.io/api/networking/v1
@ -1158,7 +1158,7 @@ k8s.io/api/authorization/v1beta1
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/imagepolicy/v1alpha1
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1 # k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.3-k3s.1
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/apiserver k8s.io/apiextensions-apiserver/pkg/apiserver
@ -1206,7 +1206,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio
k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1 # k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.3-k3s.1
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/json
@ -1268,7 +1268,7 @@ k8s.io/apimachinery/pkg/api/meta/table
k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation
k8s.io/apimachinery/pkg/runtime/serializer/yaml k8s.io/apimachinery/pkg/runtime/serializer/yaml
k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/duration
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1 # k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.3-k3s.1
k8s.io/apiserver/pkg/authentication/authenticator k8s.io/apiserver/pkg/authentication/authenticator
k8s.io/apiserver/pkg/endpoints/request k8s.io/apiserver/pkg/endpoints/request
k8s.io/apiserver/pkg/server k8s.io/apiserver/pkg/server
@ -1381,7 +1381,7 @@ k8s.io/apiserver/pkg/registry/generic/rest
k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1 k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1 # k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.3-k3s.1
k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource k8s.io/cli-runtime/pkg/resource
@ -1394,7 +1394,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1 # k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.3-k3s.1
k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes
k8s.io/client-go/kubernetes/typed/core/v1 k8s.io/client-go/kubernetes/typed/core/v1
k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd
@ -1493,6 +1493,7 @@ k8s.io/client-go/metadata
k8s.io/client-go/metadata/metadatainformer k8s.io/client-go/metadata/metadatainformer
k8s.io/client-go/restmapper k8s.io/client-go/restmapper
k8s.io/client-go/scale k8s.io/client-go/scale
k8s.io/client-go/tools/events
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
k8s.io/client-go/informers/apps/v1 k8s.io/client-go/informers/apps/v1
k8s.io/client-go/listers/apps/v1 k8s.io/client-go/listers/apps/v1
@ -1553,7 +1554,6 @@ k8s.io/client-go/listers/batch/v1
k8s.io/client-go/listers/coordination/v1beta1 k8s.io/client-go/listers/coordination/v1beta1
k8s.io/client-go/listers/autoscaling/v1 k8s.io/client-go/listers/autoscaling/v1
k8s.io/client-go/listers/storage/v1 k8s.io/client-go/listers/storage/v1
k8s.io/client-go/tools/events
k8s.io/client-go/discovery/cached/disk k8s.io/client-go/discovery/cached/disk
k8s.io/client-go/util/jsonpath k8s.io/client-go/util/jsonpath
k8s.io/client-go/listers/admissionregistration/v1 k8s.io/client-go/listers/admissionregistration/v1
@ -1580,20 +1580,20 @@ k8s.io/client-go/listers/scheduling/v1beta1
k8s.io/client-go/listers/settings/v1alpha1 k8s.io/client-go/listers/settings/v1alpha1
k8s.io/client-go/listers/storage/v1alpha1 k8s.io/client-go/listers/storage/v1alpha1
k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/third_party/forked/golang/template
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1 # k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.3-k3s.1
k8s.io/cloud-provider k8s.io/cloud-provider
k8s.io/cloud-provider/volume/helpers k8s.io/cloud-provider/volume/helpers
k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume
k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/node/helpers k8s.io/cloud-provider/node/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1 # k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.3-k3s.1
k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens k8s.io/cluster-bootstrap/util/tokens
k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/token/util
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1 # k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.3-k3s.1
k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/types k8s.io/code-generator/cmd/client-gen/types
@ -1608,7 +1608,7 @@ k8s.io/code-generator/pkg/util
k8s.io/code-generator/cmd/client-gen/generators/fake k8s.io/code-generator/cmd/client-gen/generators/fake
k8s.io/code-generator/cmd/client-gen/generators/scheme k8s.io/code-generator/cmd/client-gen/generators/scheme
k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/namer
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1 # k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.3-k3s.1
k8s.io/component-base/logs k8s.io/component-base/logs
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/metrics/prometheus/restclient k8s.io/component-base/metrics/prometheus/restclient
@ -1621,10 +1621,10 @@ k8s.io/component-base/metrics
k8s.io/component-base/featuregate k8s.io/component-base/featuregate
k8s.io/component-base/config/v1alpha1 k8s.io/component-base/config/v1alpha1
k8s.io/component-base/config/validation k8s.io/component-base/config/validation
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1 # k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.3-k3s.1
k8s.io/cri-api/pkg/apis/runtime/v1alpha2 k8s.io/cri-api/pkg/apis/runtime/v1alpha2
k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1 # k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.3-k3s.1
k8s.io/csi-translation-lib/plugins k8s.io/csi-translation-lib/plugins
k8s.io/csi-translation-lib k8s.io/csi-translation-lib
# k8s.io/gengo v0.0.0-20190822140433-26a664648505 # k8s.io/gengo v0.0.0-20190822140433-26a664648505
@ -1639,7 +1639,7 @@ k8s.io/gengo/examples/set-gen/sets
k8s.io/heapster/metrics/api/v1/types k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v0.4.0 # k8s.io/klog v0.4.0
k8s.io/klog k8s.io/klog
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1 # k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.3-k3s.1
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
@ -1667,7 +1667,7 @@ k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1b
k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1 k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1
k8s.io/kube-aggregator/pkg/apis/apiregistration/validation k8s.io/kube-aggregator/pkg/apis/apiregistration/validation
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1 # k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.3-k3s.1
k8s.io/kube-controller-manager/config/v1alpha1 k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf # k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf
k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder
@ -1678,11 +1678,11 @@ k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/aggregator
k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/proto/validation
k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemaconv
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1 # k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.3-k3s.1
k8s.io/kube-proxy/config/v1alpha1 k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1 # k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.3-k3s.1
k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1 # k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.3-k3s.1
k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate k8s.io/kubectl/pkg/cmd/annotate
@ -1759,9 +1759,9 @@ k8s.io/kubectl/pkg/util/fieldpath
k8s.io/kubectl/pkg/util/qos k8s.io/kubectl/pkg/util/qos
k8s.io/kubectl/pkg/util/resource k8s.io/kubectl/pkg/util/resource
k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/storage
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1 # k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.3-k3s.1
k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/config/v1beta1
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.2-k3s.1 # k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.3-k3s.1
k8s.io/kubernetes/cmd/hyperkube k8s.io/kubernetes/cmd/hyperkube
k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1 k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1
k8s.io/kubernetes/pkg/kubelet/util k8s.io/kubernetes/pkg/kubelet/util
@ -2477,7 +2477,7 @@ k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config/v1alpha1
k8s.io/kubernetes/pkg/apis/abac/v1beta1 k8s.io/kubernetes/pkg/apis/abac/v1beta1
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/pkg/util/maps k8s.io/kubernetes/pkg/util/maps
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1 # k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.3-k3s.1
k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/gce k8s.io/legacy-cloud-providers/gce
@ -2487,7 +2487,7 @@ k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/auth
k8s.io/legacy-cloud-providers/openstack/util/mount k8s.io/legacy-cloud-providers/openstack/util/mount
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1 # k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.3-k3s.1
k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
k8s.io/metrics/pkg/client/custom_metrics k8s.io/metrics/pkg/client/custom_metrics
k8s.io/metrics/pkg/client/external_metrics k8s.io/metrics/pkg/client/external_metrics