Update to v1.18.6-k3s1 (#2035)

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2020-07-17 16:14:37 -07:00 committed by GitHub
parent 186c4a1c6b
commit 206accbe8d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 466 additions and 240 deletions

50
go.mod
View File

@ -33,31 +33,31 @@ replace (
github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275
github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.4-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.4-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.4-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.4-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.4-k3s1
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1
k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.6-k3s1
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.6-k3s1
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.6-k3s1
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.6-k3s1
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.6-k3s1
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
)

86
go.sum
View File

@ -620,49 +620,49 @@ github.com/rancher/helm-controller v0.6.3 h1:tsqSXndQ7Ms8S0mBd2zWOcTV1tiZ4BUYkem
github.com/rancher/helm-controller v0.6.3/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc=
github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
github.com/rancher/kubernetes v1.18.4-k3s1 h1:eTwiRm2Gu8AUDgrda5FK8atZvkh6/3ZDoRFsxmnLvLc=
github.com/rancher/kubernetes v1.18.4-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1 h1:R6Iye4M4FL/qspOzcJvNPJQNnk9n7nu94FBX+jyqcIA=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1 h1:JYqgiFoq95sytgWi1hteMyKvpCZVi5HCXHMFW6sad7Y=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1 h1:UyAGvDwMVjn02+CwO5Z70vCmvDpLvmUem7ReuvoImIw=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1 h1:6h8JjWF0cmorfsKyuLdKEOaGd1fpXgu8VUG5Ynu5v2A=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1 h1:BMh4HW6YDId9g4Xja9MXJOXJI+dRyq/mw/0yKRXAIRo=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1 h1:Ly7X70k/C2xI4iuC2OZ2ktsRzyuVDVLASsQUVkBMwLM=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1 h1:M62wX7hIGwkmRoz6BC4L6d+YXczMxyan29pTfcbDqp8=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1 h1:AtFTyVKCuUHN/afuWhMIcf1vGVfZ/Uz2PpQn45aLKpU=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1 h1:QpQx2AXoU+fASZTpLjGjYT0ChNgc1RKImb8WWsOwK1U=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1 h1:aNSdkAueVRL0XfmUrALXF/NAavlPlkm0zDP/FBWJXTs=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1 h1:Q9wIqlITpmv93PApIKG75m8AnPGXrZeWcjqlUYk7Au8=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1 h1:EENXY3hRYDPo9fuF9EpccUJTgOE0wT8r3oaitEppi24=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1 h1:0h0f2gCJAKw170KoUfPsMi/CgTWHq7tb6G7IUPKun2c=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1 h1:T1NDBQWf93dCAvYC5aCH0Ik3cPoQC+5MOdhg8b4k2OM=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1 h1:tlsv2LXMIEfSLQ5wvfMZQZvUJFwrhupJu5QFxyklavY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1 h1:TkkarxQdJPnr0rs/uJsQwyL/Alo79oT8Yiakbtvq8Y4=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1 h1:b0YH7h5M8+k6LYMP6A/laGXpad4mkNmuPJoMAvhomGo=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1 h1:EsUdJepxaZSTOFZK1UrhmHPoHdnpRC8OtQWXne92vcE=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1 h1:0cbzcnS4yzFz3SyaWokOqSDsECcr/bqxDjcy263Fy54=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1 h1:xHggZMz7a4OpR0dEVz9uy1Iskk3/wzLYoGQcQAdCTtc=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.4-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/rancher/kubernetes v1.18.6-k3s1 h1:UyO6rKHsFg48cG7V19Ji0XPXScNxPJRlPPYWBKMmo6Y=
github.com/rancher/kubernetes v1.18.6-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1 h1:sIvPhLYuV3aZgYaQYASk2E9R+bB1XVcp/6d02mjgEH8=
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1 h1:W104+V2TFDIaV8IPsYQnnw8MnVtvmnl8jNUnAkkLpeI=
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1 h1:rr7Ogkk2YC89UNr/ZBywG82tuu9PlB2iqqtPS4I04n4=
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1 h1:Zd1pgZtAZS+lMfE9JRm7niTJx5W/WIxFwyA9ABGIhT8=
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1 h1:Uf6GBlxBaeJMevfwRWX8wbyJw5fIioxqIF9RMcfQSU4=
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1 h1:qRfoeAUJQbBN9/tIf50sq5aHaG+j46201yIv86Dze0g=
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1 h1:Pox7xyrB5KgaV0oZFwH6Jy0BWI+Wtj04P62awKbvXXE=
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1 h1:BXGJkd7+sag5QHSlPBzYc8Q5uepBpXEb3cmLlgjQywQ=
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1 h1:JR3PwOSBf055j96E3ADPK9/X5PrI7rHazsKb1AdZ/Gc=
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1 h1:ItVL3pMrVTnwPkIwEc3Y+5NYZWoz6nLwFrgLeNZGeG0=
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1 h1:ot08wSXmCweEe5jTkaq4AAMlY+A2SvQRlqQi4JEJnQA=
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1 h1:9ofjiqVUSRKQEMdessklWJRrgYPrBLDf9p4MMfKfZ5Q=
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1 h1:ug5WXTP2p0PVKYay4YrbLemYr0XNKqa1LxgHK8n5tGU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1 h1:yQq7RCyBaO03uzYJlnM8wyhe20Nn03KGui68CDny4qI=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1 h1:3+ND6LW6WIhVRjcpsfEupwZzsHjOUfsigomhQWhF/Ag=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1 h1:j9vNF6C374+ZbyRVArxvvw1JRHosBZe5Se8IinljIAI=
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1 h1:3w7OKA5o91NYoUZ4dYHRmLBLGWCM6PPVzywD8bkswsA=
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1 h1:9ZNoycDU5eIHFeF8YqcGLAlm9HeQDg8tXNvg46bFf8I=
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1 h1:pGHcUlVbY6TBhu0QinG1e+vN33O4aGgB8nN5uujln1Q=
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1 h1:eUs9gsRU4YZ3f9MPniiLsCLvVHj8/DtcViQH9kGh8Qg=
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y=
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.6-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo=
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
github.com/rancher/nocode v0.0.0-20200630202308-cb097102c09f h1:mX6kuT3Ah9v1iEO3rgCQ7ZH9heVh67r3a9l94ua8P7U=
github.com/rancher/nocode v0.0.0-20200630202308-cb097102c09f/go.mod h1:iAAt6Amgbysi6srDJs9SxGSbG2j/JSRb/xCrnEtA69g=

View File

@ -873,6 +873,9 @@ const (
// FieldManagerConflict is used to report when another client claims to manage this field,
// It should only be returned for a request using server-side apply.
CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict"
// CauseTypeResourceVersionTooLarge is used to report that the requested resource version
// is newer than the data observed by the API server, so the request cannot be served.
CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -446,7 +446,7 @@ redirectLoop:
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
break redirectLoop
return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
}
// Reset the connection.

View File

@ -298,6 +298,16 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques
rawResponse = headerBytes
}
// If the backend did not upgrade the request, return an error to the client. If the response was
// an error, the error is forwarded directly after the connection is hijacked. Otherwise, just
// return a generic error here.
if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 {
err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode)
klog.Errorf("Proxy upgrade error: %v", err)
h.Responder.Error(w, req, err)
return true
}
// Once the connection is hijacked, the ErrorResponder will no longer work, so
// hijacking should be the last step in the upgrade.
requestHijacker, ok := w.(http.Hijacker)

View File

@ -608,13 +608,20 @@ func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*G
}
genericApiServerHookName := "generic-apiserver-start-informers"
if c.SharedInformerFactory != nil && !s.isPostStartHookRegistered(genericApiServerHookName) {
err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error {
c.SharedInformerFactory.Start(context.StopCh)
return nil
})
if err != nil {
return nil, err
if c.SharedInformerFactory != nil {
if !s.isPostStartHookRegistered(genericApiServerHookName) {
err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error {
c.SharedInformerFactory.Start(context.StopCh)
return nil
})
if err != nil {
return nil, err
}
// TODO: Once we get rid of /healthz consider changing this to post-start-hook.
err = s.addReadyzChecks(healthz.NewInformerSyncHealthz(c.SharedInformerFactory))
if err != nil {
return nil, err
}
}
}

View File

@ -31,6 +31,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/server/httplog"
"k8s.io/client-go/informers"
"k8s.io/klog"
)
@ -81,6 +82,39 @@ func (l *log) Check(_ *http.Request) error {
return fmt.Errorf("logging blocked")
}
type informerSync struct {
sharedInformerFactory informers.SharedInformerFactory
}
var _ HealthChecker = &informerSync{}
// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given sharedInformerFactory sync.
func NewInformerSyncHealthz(sharedInformerFactory informers.SharedInformerFactory) HealthChecker {
return &informerSync{
sharedInformerFactory: sharedInformerFactory,
}
}
func (i *informerSync) Name() string {
return "informer-sync"
}
func (i *informerSync) Check(_ *http.Request) error {
stopCh := make(chan struct{})
// Close stopCh to force checking if informers are synced now.
close(stopCh)
var informersByStarted map[bool][]string
for informerType, started := range i.sharedInformerFactory.WaitForCacheSync(stopCh) {
informersByStarted[started] = append(informersByStarted[started], informerType.String())
}
if notStarted := informersByStarted[false]; len(notStarted) > 0 {
return fmt.Errorf("%d informers not started yet: %v", len(notStarted), notStarted)
}
return nil
}
// NamedCheck returns a healthz checker for the given name and function.
func NamedCheck(name string, check func(r *http.Request) error) HealthChecker {
return &healthzCheck{name, check}

View File

@ -177,7 +177,12 @@ var tooLargeResourceVersionCauseMsg = "Too large resource version"
// a minimum resource version that is larger than the largest currently available resource version for a requested resource.
func NewTooLargeResourceVersionError(minimumResourceVersion, currentRevision uint64, retrySeconds int) error {
err := errors.NewTimeoutError(fmt.Sprintf("Too large resource version: %d, current: %d", minimumResourceVersion, currentRevision), retrySeconds)
err.ErrStatus.Details.Causes = []metav1.StatusCause{{Message: tooLargeResourceVersionCauseMsg}}
err.ErrStatus.Details.Causes = []metav1.StatusCause{
{
Type: metav1.CauseTypeResourceVersionTooLarge,
Message: tooLargeResourceVersionCauseMsg,
},
}
return err
}
@ -186,15 +191,5 @@ func IsTooLargeResourceVersion(err error) bool {
if !errors.IsTimeout(err) {
return false
}
switch t := err.(type) {
case errors.APIStatus:
if d := t.Status().Details; d != nil {
for _, cause := range d.Causes {
if cause.Message == tooLargeResourceVersionCauseMsg {
return true
}
}
}
}
return false
return errors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
}

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "18"
gitVersion = "v1.18.4-k3s1"
gitCommit = "52f8d69fc5bdd2ceb2ea47fb3b51190bba9e5c06"
gitVersion = "v1.18.6-k3s1"
gitCommit = "60da51f880b8e78467c08510bde06c1b3b2dedbe"
gitTreeState = "clean"
buildDate = "2020-06-17T20:02:22Z"
buildDate = "2020-07-15T21:57:06Z"
)

View File

@ -82,9 +82,9 @@ type Reflector struct {
// observed when doing a sync with the underlying store
// it is thread safe, but not synchronized with the underlying store
lastSyncResourceVersion string
// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
// failed with an HTTP 410 (Gone) status code.
isLastSyncResourceVersionGone bool
// isLastSyncResourceVersionUnavailable is true if the previous list or watch request with
// lastSyncResourceVersion failed with an "expired" or "too large resource version" error.
isLastSyncResourceVersionUnavailable bool
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
lastSyncResourceVersionMutex sync.RWMutex
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
@ -256,13 +256,14 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
}
list, paginatedResult, err = pager.List(context.Background(), options)
if isExpiredError(err) {
r.setIsLastSyncResourceVersionExpired(true)
// Retry immediately if the resource version used to list is expired.
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
r.setIsLastSyncResourceVersionUnavailable(true)
// Retry immediately if the resource version used to list is unavailable.
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
// continuation pages, but the pager might not be enabled, or the full list might fail because the
// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
// to recover and ensure the reflector makes forward progress.
// continuation pages, but the pager might not be enabled, the full list might fail because the
// resource version it is listing at is expired or the cache may not yet be synced to the provided
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
// the reflector makes forward progress.
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
}
close(listCh)
@ -292,7 +293,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
r.paginatedResult = true
}
r.setIsLastSyncResourceVersionExpired(false) // list was successful
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
initTrace.Step("Objects listed")
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
@ -396,7 +397,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
if err != errorStopRequested {
switch {
case isExpiredError(err):
// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
// has a semantic that it returns data at least as fresh as provided RV.
// So first try to LIST with setting RV to resource version of last observed object.
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
@ -519,9 +520,9 @@ func (r *Reflector) relistResourceVersion() string {
r.lastSyncResourceVersionMutex.RLock()
defer r.lastSyncResourceVersionMutex.RUnlock()
if r.isLastSyncResourceVersionGone {
if r.isLastSyncResourceVersionUnavailable {
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
// if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector
// to the latest available ResourceVersion, using a consistent read from etcd.
return ""
}
@ -533,12 +534,12 @@ func (r *Reflector) relistResourceVersion() string {
return r.lastSyncResourceVersion
}
// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
// expired error: HTTP 410 (Gone) Status Code.
func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
// "expired" or "too large resource version" error.
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
r.lastSyncResourceVersionMutex.Lock()
defer r.lastSyncResourceVersionMutex.Unlock()
r.isLastSyncResourceVersionGone = isExpired
r.isLastSyncResourceVersionUnavailable = isUnavailable
}
func isExpiredError(err error) bool {
@ -548,3 +549,7 @@ func isExpiredError(err error) bool {
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
}
func isTooLargeResourceVersionError(err error) bool {
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
}

View File

@ -3,8 +3,8 @@ package version
var (
gitMajor = "1"
gitMinor = "18"
gitVersion = "v1.18.4-k3s1"
gitCommit = "52f8d69fc5bdd2ceb2ea47fb3b51190bba9e5c06"
gitVersion = "v1.18.6-k3s1"
gitCommit = "60da51f880b8e78467c08510bde06c1b3b2dedbe"
gitTreeState = "clean"
buildDate = "2020-06-17T20:02:22Z"
buildDate = "2020-07-15T21:57:06Z"
)

View File

@ -332,6 +332,7 @@ func startAttachDetachController(ctx ControllerContext) (http.Handler, bool, err
ctx.InformerFactory.Core().V1().PersistentVolumes(),
csiNodeInformer,
csiDriverInformer,
ctx.InformerFactory.Storage().V1().VolumeAttachments(),
ctx.Cloud,
plugins,
GetDynamicPluginProber(ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration),

View File

@ -4250,7 +4250,7 @@ type PodLogOptions struct {
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
SinceTime *metav1.Time
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// If true, add an RFC 3339 timestamp with 9 digits of fractional seconds at the beginning of every line
// of log output.
Timestamps bool
// If set, the number of lines from the end of the logs to show. If not specified,

View File

@ -56,6 +56,7 @@ go_test(
name = "go_default_test",
srcs = [
"daemon_controller_test.go",
"init_test.go",
"update_test.go",
],
embed = [":go_default_library"],
@ -76,6 +77,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
@ -84,6 +86,7 @@ go_test(
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -163,17 +163,8 @@ func NewDaemonSetsController(
}
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
ds := obj.(*apps.DaemonSet)
klog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
},
UpdateFunc: func(old, cur interface{}) {
oldDS := old.(*apps.DaemonSet)
curDS := cur.(*apps.DaemonSet)
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS)
},
AddFunc: dsc.addDaemonset,
UpdateFunc: dsc.updateDaemonset,
DeleteFunc: dsc.deleteDaemonset,
})
dsc.dsLister = daemonSetInformer.Lister()
@ -231,22 +222,59 @@ func indexByPodNodeName(obj interface{}) ([]string, error) {
return []string{pod.Spec.NodeName}, nil
}
func (dsc *DaemonSetsController) addDaemonset(obj interface{}) {
ds := obj.(*apps.DaemonSet)
klog.V(4).Infof("Adding daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
}
func (dsc *DaemonSetsController) updateDaemonset(cur, old interface{}) {
oldDS := old.(*apps.DaemonSet)
curDS := cur.(*apps.DaemonSet)
// TODO: make a KEP and fix informers to always call the delete event handler on re-create
if curDS.UID != oldDS.UID {
key, err := controller.KeyFunc(oldDS)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", oldDS, err))
return
}
dsc.deleteDaemonset(cache.DeletedFinalStateUnknown{
Key: key,
Obj: oldDS,
})
}
klog.V(4).Infof("Updating daemon set %s", oldDS.Name)
dsc.enqueueDaemonSet(curDS)
}
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
ds, ok := obj.(*apps.DaemonSet)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
return
}
ds, ok = tombstone.Obj.(*apps.DaemonSet)
if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj))
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a DaemonSet %#v", obj))
return
}
}
klog.V(4).Infof("Deleting daemon set %s", ds.Name)
dsc.enqueueDaemonSet(ds)
key, err := controller.KeyFunc(ds)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", ds, err))
return
}
// Delete expectations for the DaemonSet so if we create a new one with the same name it starts clean
dsc.expectations.DeleteExpectations(key)
dsc.queue.Add(key)
}
// Run begins watching and syncing daemon sets.

View File

@ -110,6 +110,7 @@ func NewAttachDetachController(
pvInformer coreinformers.PersistentVolumeInformer,
csiNodeInformer storageinformersv1.CSINodeInformer,
csiDriverInformer storageinformersv1.CSIDriverInformer,
volumeAttachmentInformer storageinformersv1.VolumeAttachmentInformer,
cloud cloudprovider.Interface,
plugins []volume.VolumePlugin,
prober volume.DynamicPluginProber,
@ -141,6 +142,9 @@ func NewAttachDetachController(
adc.csiDriverLister = csiDriverInformer.Lister()
adc.csiDriversSynced = csiDriverInformer.Informer().HasSynced
adc.volumeAttachmentLister = volumeAttachmentInformer.Lister()
adc.volumeAttachmentSynced = volumeAttachmentInformer.Informer().HasSynced
if err := adc.volumePluginMgr.InitPlugins(plugins, prober, adc); err != nil {
return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
}
@ -280,6 +284,12 @@ type attachDetachController struct {
csiDriverLister storagelistersv1.CSIDriverLister
csiDriversSynced kcache.InformerSynced
// volumeAttachmentLister is the shared volumeAttachment lister used to fetch and store
// VolumeAttachment objects from the API server. It is shared with other controllers
// and therefore the VolumeAttachment objects in its store should be treated as immutable.
volumeAttachmentLister storagelistersv1.VolumeAttachmentLister
volumeAttachmentSynced kcache.InformerSynced
// cloud provider used by volume host
cloud cloudprovider.Interface
@ -345,6 +355,9 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
if adc.csiDriversSynced != nil {
synced = append(synced, adc.csiDriversSynced)
}
if adc.volumeAttachmentSynced != nil {
synced = append(synced, adc.volumeAttachmentSynced)
}
if !kcache.WaitForNamedCacheSync("attach detach", stopCh, synced...) {
return
@ -697,6 +710,10 @@ func (adc *attachDetachController) IsAttachDetachController() bool {
return true
}
func (adc *attachDetachController) VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister {
return adc.volumeAttachmentLister
}
// VolumeHost implementation
// This is an unfortunate requirement of the current factoring of volume plugin
// initializing code. It requires kubelet specific methods used by the mounting

View File

@ -58,6 +58,10 @@ type CachingDockerConfigProvider struct {
Provider DockerConfigProvider
Lifetime time.Duration
// ShouldCache is an optional function that returns true if the specific config should be cached.
// If nil, all configs are treated as cacheable.
ShouldCache func(DockerConfig) bool
// cache fields
cacheDockerConfig DockerConfig
expiration time.Time
@ -96,7 +100,10 @@ func (d *CachingDockerConfigProvider) Provide(image string) DockerConfig {
}
klog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String())
d.cacheDockerConfig = d.Provider.Provide(image)
d.expiration = time.Now().Add(d.Lifetime)
return d.cacheDockerConfig
config := d.Provider.Provide(image)
if d.ShouldCache == nil || d.ShouldCache(config) {
d.cacheDockerConfig = config
d.expiration = time.Now().Add(d.Lifetime)
}
return config
}

View File

@ -53,8 +53,9 @@ func (ds *dockerService) ListContainerStats(ctx context.Context, r *runtimeapi.L
if err != nil {
return nil, err
}
stats = append(stats, containerStats)
if containerStats != nil {
stats = append(stats, containerStats)
}
}
return &runtimeapi.ListContainerStatsResponse{Stats: stats}, nil

View File

@ -35,7 +35,13 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont
hcsshim_container, err := hcsshim.OpenContainer(containerID)
if err != nil {
return nil, err
// As we moved from using Docker stats to hcsshim directly, we may query HCS with already exited container IDs.
// That will typically happen with init-containers in Exited state. Docker still knows about them but the HCS does not.
// As we don't want to block stats retrieval for other containers, we only log errors.
if !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyStopped(err) {
klog.Errorf("Error opening container (stats will be missing) '%s': %v", containerID, err)
}
return nil, nil
}
defer func() {
closeErr := hcsshim_container.Close()

View File

@ -65,6 +65,7 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",

View File

@ -26,6 +26,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
@ -98,6 +99,8 @@ type managerImpl struct {
thresholdNotifiers []ThresholdNotifier
// thresholdsLastUpdated is the last time the thresholdNotifiers were updated.
thresholdsLastUpdated time.Time
// etcHostsPath is a function that will get the etc-hosts file's path for a pod given its UID
etcHostsPath func(podUID types.UID) string
}
// ensure it implements the required interface
@ -114,6 +117,7 @@ func NewManager(
recorder record.EventRecorder,
nodeRef *v1.ObjectReference,
clock clock.Clock,
etcHostsPath func(types.UID) string,
) (Manager, lifecycle.PodAdmitHandler) {
manager := &managerImpl{
clock: clock,
@ -129,6 +133,7 @@ func NewManager(
thresholdsFirstObservedAt: thresholdsObservedAt{},
dedicatedImageFs: nil,
thresholdNotifiers: []ThresholdNotifier{},
etcHostsPath: etcHostsPath,
}
return manager, manager
}
@ -515,7 +520,7 @@ func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStat
} else {
fsStatsSet = []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}
}
podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet)
podEphemeralUsage, err := podLocalEphemeralStorageUsage(podStats, pod, fsStatsSet, m.etcHostsPath(pod.UID))
if err != nil {
klog.Errorf("eviction manager: error getting pod disk usage %v", err)
return false

View File

@ -18,6 +18,7 @@ package eviction
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
@ -415,7 +416,7 @@ func localEphemeralVolumeNames(pod *v1.Pod) []string {
}
// podLocalEphemeralStorageUsage aggregates pod local ephemeral storage usage and inode consumption for the specified stats to measure.
func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType, etcHostsPath string) (v1.ResourceList, error) {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.DecimalSI}
@ -429,6 +430,12 @@ func podLocalEphemeralStorageUsage(podStats statsapi.PodStats, pod *v1.Pod, stat
disk.Add(podLocalVolumeUsageList[v1.ResourceEphemeralStorage])
inodes.Add(podLocalVolumeUsageList[resourceInodes])
}
if len(etcHostsPath) > 0 {
if stat, err := os.Stat(etcHostsPath); err == nil {
disk.Add(*resource.NewQuantity(int64(stat.Size()), resource.BinarySI))
inodes.Add(*resource.NewQuantity(int64(1), resource.DecimalSI))
}
}
return v1.ResourceList{
v1.ResourceEphemeralStorage: disk,
resourceInodes: inodes,

View File

@ -843,8 +843,9 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
klet.podKillingCh = make(chan *kubecontainer.PodPair, podKillingChannelCapacity)
etcHostsPathFunc := func(podUID types.UID) string { return getEtcHostsPath(klet.getPodDir(podUID)) }
// setup eviction manager
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock)
evictionManager, evictionAdmitHandler := eviction.NewManager(klet.resourceAnalyzer, evictionConfig, killPodNow(klet.podWorkers, kubeDeps.Recorder), klet.podManager.GetMirrorPodByPod, klet.imageManager, klet.containerGC, kubeDeps.Recorder, nodeRef, klet.clock, etcHostsPathFunc)
klet.evictionManager = evictionManager
klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)

View File

@ -62,7 +62,7 @@ func (kl *Kubelet) syncNetworkUtil() {
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err)
return
}
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--set-xmark", dropMark); err != nil {
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--or-mark", dropMark); err != nil {
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err)
return
}
@ -72,7 +72,7 @@ func (kl *Kubelet) syncNetworkUtil() {
}
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
"-m", "comment", "--comment", "kubernetes firewall for dropping marked packets",
"-m", "mark", "--mark", dropMark,
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", dropMark, dropMark),
"-j", "DROP"); err != nil {
klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
return
@ -112,7 +112,7 @@ func (kl *Kubelet) syncNetworkUtil() {
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err)
return
}
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--set-xmark", masqueradeMark); err != nil {
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--or-mark", masqueradeMark); err != nil {
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err)
return
}
@ -121,12 +121,26 @@ func (kl *Kubelet) syncNetworkUtil() {
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err)
return
}
// Establish the masquerading rule.
// Set up KUBE-POSTROUTING to unmark and masquerade marked packets
// NB: THIS MUST MATCH the corresponding code in the iptables and ipvs
// modes of kube-proxy
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", masqueradeMark, masqueradeMark),
"-j", "RETURN"); err != nil {
klog.Errorf("Failed to ensure filtering rule for %v: %v", KubePostroutingChain, err)
return
}
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
// We know the mark bit is currently set so we can use --xor-mark to clear it (without needing
// to Sprintf another bitmask).
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
"-j", "MARK", "--xor-mark", masqueradeMark); err != nil {
klog.Errorf("Failed to ensure unmarking rule for %v: %v", KubePostroutingChain, err)
return
}
masqRule := []string{
"-m", "comment", "--comment", "kubernetes service traffic requiring SNAT",
"-m", "mark", "--mark", masqueradeMark,
"-j", "MASQUERADE",
}
if kl.iptClient.HasRandomFully() {
@ -144,5 +158,5 @@ func (kl *Kubelet) syncNetworkUtil() {
// getIPTablesMark returns the fwmark given the bit
func getIPTablesMark(bit int) string {
value := 1 << uint(bit)
return fmt.Sprintf("%#08x/%#08x", value, value)
return fmt.Sprintf("%#08x", value)
}

View File

@ -291,11 +291,16 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
}
}
// getEtcHostsPath returns the full host-side path to a pod's generated /etc/hosts file
func getEtcHostsPath(podDir string) string {
return path.Join(podDir, "etc-hosts")
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with. podIPs is provided instead of podIP as podIPs
// are present even if dual-stack feature flag is not enabled.
func makeHostsMount(podDir string, podIPs []string, hostName, hostDomainName string, hostAliases []v1.HostAlias, useHostNetwork bool) (*kubecontainer.Mount, error) {
hostsFilePath := path.Join(podDir, "etc-hosts")
hostsFilePath := getEtcHostsPath(podDir)
if err := ensureHostsFile(hostsFilePath, podIPs, hostName, hostDomainName, hostAliases, useHostNetwork); err != nil {
return nil, err
}

View File

@ -6,6 +6,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/types:go_default_library",
"//pkg/util/tail:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/cri-api/pkg/apis:go_default_library",

View File

@ -36,6 +36,7 @@ import (
"k8s.io/api/core/v1"
internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/tail"
)
@ -48,8 +49,10 @@ import (
// TODO(random-liu): Support log rotation.
const (
// timeFormat is the time format used in the log.
timeFormat = time.RFC3339Nano
// timeFormatOut is the format for writing timestamps to output.
timeFormatOut = types.RFC3339NanoFixed
// timeFormatIn is the format for parsing timestamps from other logs.
timeFormatIn = types.RFC3339NanoLenient
// logForceCheckPeriod is the period to check for a new read
logForceCheckPeriod = 1 * time.Second
@ -129,9 +132,9 @@ func parseCRILog(log []byte, msg *logMessage) error {
if idx < 0 {
return fmt.Errorf("timestamp is not found")
}
msg.timestamp, err = time.Parse(timeFormat, string(log[:idx]))
msg.timestamp, err = time.Parse(timeFormatIn, string(log[:idx]))
if err != nil {
return fmt.Errorf("unexpected timestamp format %q: %v", timeFormat, err)
return fmt.Errorf("unexpected timestamp format %q: %v", timeFormatIn, err)
}
// Parse stream type
@ -228,7 +231,7 @@ func (w *logWriter) write(msg *logMessage) error {
}
line := msg.log
if w.opts.timestamp {
prefix := append([]byte(msg.timestamp.Format(timeFormat)), delimiter[0])
prefix := append([]byte(msg.timestamp.Format(timeFormatOut)), delimiter[0])
line = append(prefix, line...)
}
// If the line is longer than the remaining bytes, cut it.

View File

@ -236,18 +236,6 @@ func (m *manager) CleanupPods(desiredPods map[types.UID]sets.Empty) {
func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) {
for i, c := range podStatus.ContainerStatuses {
var ready bool
if c.State.Running == nil {
ready = false
} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
ready = result == results.Success
} else {
// The check whether there is a probe which hasn't run yet.
_, exists := m.getWorker(podUID, c.Name, readiness)
ready = !exists
}
podStatus.ContainerStatuses[i].Ready = ready
var started bool
if c.State.Running == nil {
started = false
@ -262,6 +250,20 @@ func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) {
started = !exists
}
podStatus.ContainerStatuses[i].Started = &started
if started {
var ready bool
if c.State.Running == nil {
ready = false
} else if result, ok := m.readinessManager.Get(kubecontainer.ParseContainerID(c.ContainerID)); ok {
ready = result == results.Success
} else {
// The check whether there is a probe which hasn't run yet.
_, exists := m.getWorker(podUID, c.Name, readiness)
ready = !exists
}
podStatus.ContainerStatuses[i].Ready = ready
}
}
// init containers are ready if they have exited with success or if a readiness probe has
// succeeded.

View File

@ -29,4 +29,9 @@ const (
SystemReservedEnforcementKey = "system-reserved"
KubeReservedEnforcementKey = "kube-reserved"
NodeAllocatableNoneKey = "none"
// fixed width version of time.RFC3339Nano
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// variable width RFC3339 time format for lenient parsing of strings into timestamps
RFC3339NanoLenient = "2006-01-02T15:04:05.999999999Z07:00"
)

View File

@ -43,10 +43,10 @@ func NewTimestamp() *Timestamp {
return &Timestamp{time.Now()}
}
// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout,
// ConvertToTimestamp takes a string, parses it using the RFC3339NanoLenient layout,
// and converts it to a Timestamp object.
func ConvertToTimestamp(timeString string) *Timestamp {
parsed, _ := time.Parse(time.RFC3339Nano, timeString)
parsed, _ := time.Parse(RFC3339NanoLenient, timeString)
return &Timestamp{parsed}
}
@ -55,10 +55,10 @@ func (t *Timestamp) Get() time.Time {
return t.time
}
// GetString returns the time in the string format using the RFC3339Nano
// GetString returns the time in the string format using the RFC3339NanoFixed
// layout.
func (t *Timestamp) GetString() string {
return t.time.Format(time.RFC3339Nano)
return t.time.Format(RFC3339NanoFixed)
}
// A type to help sort container statuses based on container names.

View File

@ -284,7 +284,7 @@ func NewProxier(ipt utiliptables.Interface,
// Generate the masquerade mark to use for SNAT rules.
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
endpointSlicesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.EndpointSliceProxying)
@ -900,10 +900,20 @@ func (proxier *Proxier) syncProxyRules() {
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
// NB: THIS MUST MATCH the corresponding code in the kubelet
writeLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "RETURN",
}...)
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
writeLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
// XOR proxier.masqueradeMark to unset it
"-j", "MARK", "--xor-mark", proxier.masqueradeMark,
}...)
masqRule := []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-j", "MASQUERADE",
}
if proxier.iptables.HasRandomFully() {
@ -919,7 +929,7 @@ func (proxier *Proxier) syncProxyRules() {
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(KubeMarkMasqChain),
"-j", "MARK", "--set-xmark", proxier.masqueradeMark,
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
}...)
// Accumulate NAT chains to keep.
@ -1505,7 +1515,7 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.filterRules,
"-A", string(kubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "ACCEPT",
)

View File

@ -419,7 +419,7 @@ func NewProxier(ipt utiliptables.Interface,
// Generate the masquerade mark to use for SNAT rules.
masqueradeValue := 1 << uint(masqueradeBit)
masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue)
masqueradeMark := fmt.Sprintf("%#08x", masqueradeValue)
isIPv6 := utilnet.IsIPv6(nodeIP)
@ -1751,7 +1751,7 @@ func (proxier *Proxier) writeIptablesRules() {
writeLine(proxier.filterRules,
"-A", string(KubeForwardChain),
"-m", "comment", "--comment", `"kubernetes forwarding rules"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-m", "mark", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "ACCEPT",
)
@ -1773,6 +1773,39 @@ func (proxier *Proxier) writeIptablesRules() {
"-j", "ACCEPT",
)
// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
// NB: THIS MUST MATCH the corresponding code in the kubelet
writeLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
"-m", "mark", "!", "--mark", fmt.Sprintf("%s/%s", proxier.masqueradeMark, proxier.masqueradeMark),
"-j", "RETURN",
}...)
// Clear the mark to avoid re-masquerading if the packet re-traverses the network stack.
writeLine(proxier.natRules, []string{
"-A", string(kubePostroutingChain),
// XOR proxier.masqueradeMark to unset it
"-j", "MARK", "--xor-mark", proxier.masqueradeMark,
}...)
masqRule := []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-j", "MASQUERADE",
}
if proxier.iptables.HasRandomFully() {
masqRule = append(masqRule, "--random-fully")
}
writeLine(proxier.natRules, masqRule...)
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(KubeMarkMasqChain),
"-j", "MARK", "--or-mark", proxier.masqueradeMark,
}...)
// Write the end-of-table markers.
writeLine(proxier.filterRules, "COMMIT")
writeLine(proxier.natRules, "COMMIT")
@ -1831,31 +1864,6 @@ func (proxier *Proxier) createAndLinkeKubeChain() {
}
}
// Install the kubernetes-specific postrouting rules. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
// NB: THIS MUST MATCH the corresponding code in the kubelet
masqRule := []string{
"-A", string(kubePostroutingChain),
"-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`,
"-m", "mark", "--mark", proxier.masqueradeMark,
"-j", "MASQUERADE",
}
if proxier.iptables.HasRandomFully() {
masqRule = append(masqRule, "--random-fully")
klog.V(3).Info("Using `--random-fully` in the MASQUERADE rule for iptables")
} else {
klog.V(2).Info("Not using `--random-fully` in the MASQUERADE rule for iptables because the local version of iptables does not support it")
}
writeLine(proxier.natRules, masqRule...)
// Install the kubernetes-specific masquerade mark rule. We use a whole chain for
// this so that it is easier to flush and change, for example if the mark
// value should ever change.
writeLine(proxier.natRules, []string{
"-A", string(KubeMarkMasqChain),
"-j", "MARK", "--set-xmark", proxier.masqueradeMark,
}...)
}
// getExistingChains get iptables-save output so we can check for existing chains and rules.

View File

@ -79,8 +79,10 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",

View File

@ -197,8 +197,19 @@ func (c *csiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.No
}
attachID := getAttachmentName(volumeHandle, driverName, string(nodeName))
var attach *storage.VolumeAttachment
if c.plugin.volumeAttachmentLister != nil {
attach, err = c.plugin.volumeAttachmentLister.Get(attachID)
if err == nil {
attached[spec] = attach.Status.Attached
continue
}
klog.V(4).Info(log("attacher.VolumesAreAttached failed in AttachmentLister for attach.ID=%v: %v. Probing the API server.", attachID, err))
}
// The cache lookup is not setup or the object is not found in the cache.
// Get the object from the API server.
klog.V(4).Info(log("probing attachment status for VolumeAttachment %v", attachID))
attach, err := c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
attach, err = c.k8s.StorageV1().VolumeAttachments().Get(context.TODO(), attachID, meta.GetOptions{})
if err != nil {
attached[spec] = false
klog.Error(log("attacher.VolumesAreAttached failed for attach.ID=%v: %v", attachID, err))

View File

@ -59,9 +59,10 @@ const (
)
type csiPlugin struct {
host volume.VolumeHost
blockEnabled bool
csiDriverLister storagelisters.CSIDriverLister
host volume.VolumeHost
blockEnabled bool
csiDriverLister storagelisters.CSIDriverLister
volumeAttachmentLister storagelisters.VolumeAttachmentLister
}
// ProbeVolumePlugins returns implemented plugins
@ -186,13 +187,17 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
if csiClient == nil {
klog.Warning(log("kubeclient not set, assuming standalone kubelet"))
} else {
// set CSIDriverLister
// set CSIDriverLister and volumeAttachmentLister
adcHost, ok := host.(volume.AttachDetachVolumeHost)
if ok {
p.csiDriverLister = adcHost.CSIDriverLister()
if p.csiDriverLister == nil {
klog.Error(log("CSIDriverLister not found on AttachDetachVolumeHost"))
}
p.volumeAttachmentLister = adcHost.VolumeAttachmentLister()
if p.volumeAttachmentLister == nil {
klog.Error(log("VolumeAttachmentLister not found on AttachDetachVolumeHost"))
}
}
kletHost, ok := host.(volume.KubeletVolumeHost)
if ok {
@ -200,6 +205,8 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
if p.csiDriverLister == nil {
klog.Error(log("CSIDriverLister not found on KubeletVolumeHost"))
}
// We don't run the volumeAttachmentLister in the kubelet context
p.volumeAttachmentLister = nil
}
}

View File

@ -351,6 +351,8 @@ type AttachDetachVolumeHost interface {
// CSIDriverLister returns the informer lister for the CSIDriver API Object
CSIDriverLister() storagelistersv1.CSIDriverLister
// VolumeAttachmentLister returns the informer lister for the VolumeAttachment API Object
VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister
// IsAttachDetachController is an interface marker to strictly tie AttachDetachVolumeHost
// to the attachDetachController
IsAttachDetachController() bool

View File

@ -44,7 +44,8 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
return err
}
disks := filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
if isManagedDisk {
managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
@ -131,7 +132,8 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
return err
}
disks := filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
bFoundDisk := false
for i, disk := range disks {
@ -140,7 +142,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
// found the disk
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
disks[i].ToBeDetached = to.BoolPtr(true)
bFoundDisk = true
break
}

View File

@ -46,7 +46,8 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
disks := []compute.DataDisk{}
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
disks = filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
}
if isManagedDisk {
managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
@ -136,7 +137,8 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
disks := []compute.DataDisk{}
if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
disks = filterDetachingDisks(*vm.StorageProfile.DataDisks)
disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
copy(disks, *vm.StorageProfile.DataDisks)
}
bFoundDisk := false
for i, disk := range disks {
@ -145,7 +147,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
// found the disk
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
disks = append(disks[:i], disks[i+1:]...)
disks[i].ToBeDetached = to.BoolPtr(true)
bFoundDisk = true
break
}

View File

@ -21,7 +21,7 @@ package azure
import (
"context"
"fmt"
"strconv"
"os"
"strings"
v1 "k8s.io/api/core/v1"
@ -35,6 +35,10 @@ const (
vmPowerStatePrefix = "PowerState/"
vmPowerStateStopped = "stopped"
vmPowerStateDeallocated = "deallocated"
// nodeNameEnvironmentName is the environment variable name for getting node name.
// It is only used for out-of-tree cloud provider.
nodeNameEnvironmentName = "NODE_NAME"
)
var (
@ -231,17 +235,20 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
}
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
var err error
nodeName := mapNodeNameToVMName(name)
// VMSS vmName is not same with hostname, use hostname instead.
if az.VMType == vmTypeVMSS {
// VMSS vmName is not same with hostname, construct the node name "{computer-name-prefix}{base-36-instance-id}".
// Refer https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-instance-ids#scale-set-vm-computer-name.
if ssName, instanceID, err := extractVmssVMName(metadataVMName); err == nil {
instance, err := strconv.ParseInt(instanceID, 10, 64)
if err != nil {
return false, fmt.Errorf("failed to parse VMSS instanceID %q: %v", instanceID, err)
}
metadataVMName = fmt.Sprintf("%s%06s", ssName, strconv.FormatInt(instance, 36))
metadataVMName, err = os.Hostname()
if err != nil {
return false, err
}
// Use name from env variable "NODE_NAME" if it is set.
nodeNameEnv := os.Getenv(nodeNameEnvironmentName)
if nodeNameEnv != "" {
metadataVMName = nodeNameEnv
}
}

View File

@ -102,6 +102,8 @@ const (
serviceTagKey = "service"
// clusterNameKey is the cluster name key applied for public IP tags.
clusterNameKey = "kubernetes-cluster-name"
defaultLoadBalancerSourceRanges = "0.0.0.0/0"
)
// GetLoadBalancer returns whether the specified load balancer and its components exist, and
@ -1132,6 +1134,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
if lbIP != nil {
destinationIPAddress = *lbIP
}
if destinationIPAddress == "" {
destinationIPAddress = "*"
}
@ -1141,6 +1144,12 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
return nil, err
}
serviceTags := getServiceTags(service)
if len(serviceTags) != 0 {
if _, ok := sourceRanges[defaultLoadBalancerSourceRanges]; ok {
delete(sourceRanges, defaultLoadBalancerSourceRanges)
}
}
var sourceAddressPrefixes []string
if (sourceRanges == nil || servicehelpers.IsAllowAll(sourceRanges)) && len(serviceTags) == 0 {
if !requiresInternalLoadBalancer(service) {

View File

@ -336,6 +336,13 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
return nil, err
}
labels := map[string]string{
v1.LabelZoneRegion: c.Location,
}
// no azure credential is set, return nil
if c.DisksClient == nil {
return labels, nil
}
// Get information of the disk.
ctx, cancel := getContextWithCancel()
defer cancel()
@ -348,7 +355,7 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
// Check whether availability zone is specified.
if disk.Zones == nil || len(*disk.Zones) == 0 {
klog.V(4).Infof("Azure disk %q is not zoned", diskName)
return nil, nil
return labels, nil
}
zones := *disk.Zones
@ -359,9 +366,6 @@ func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
zone := c.makeZone(c.Location, zoneID)
klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
labels := map[string]string{
v1.LabelZoneRegion: c.Location,
v1.LabelZoneFailureDomain: zone,
}
labels[v1.LabelZoneFailureDomain] = zone
return labels, nil
}

42
vendor/modules.txt vendored
View File

@ -1212,7 +1212,7 @@ gopkg.in/square/go-jose.v2/jwt
gopkg.in/warnings.v0
# gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v2
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.4-k3s1
# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.6-k3s1
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
@ -1256,7 +1256,7 @@ k8s.io/api/settings/v1alpha1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.4-k3s1
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.6-k3s1
k8s.io/apiextensions-apiserver/pkg/apihelpers
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
@ -1296,7 +1296,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
k8s.io/apiextensions-apiserver/pkg/registry/customresource
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.4-k3s1
# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.6-k3s1
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
@ -1358,7 +1358,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.4-k3s1
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.6-k3s1
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer
@ -1488,7 +1488,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
k8s.io/apiserver/plugin/pkg/authorizer/webhook
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.4-k3s1
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.6-k3s1
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/kustomize
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
@ -1501,7 +1501,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.4-k3s1
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.6-k3s1
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached
k8s.io/client-go/discovery/cached/disk
@ -1735,7 +1735,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.4-k3s1
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.6-k3s1
k8s.io/cloud-provider
k8s.io/cloud-provider/api
k8s.io/cloud-provider/node/helpers
@ -1743,13 +1743,13 @@ k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.4-k3s1
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.6-k3s1
k8s.io/cluster-bootstrap/token/api
k8s.io/cluster-bootstrap/token/jws
k8s.io/cluster-bootstrap/token/util
k8s.io/cluster-bootstrap/util/secrets
k8s.io/cluster-bootstrap/util/tokens
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.4-k3s1
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.6-k3s1
k8s.io/code-generator/cmd/client-gen/args
k8s.io/code-generator/cmd/client-gen/generators
k8s.io/code-generator/cmd/client-gen/generators/fake
@ -1764,7 +1764,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.4-k3s1
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.6-k3s1
k8s.io/component-base/cli/flag
k8s.io/component-base/cli/globalflag
k8s.io/component-base/codec
@ -1782,10 +1782,10 @@ k8s.io/component-base/metrics/prometheus/workqueue
k8s.io/component-base/metrics/testutil
k8s.io/component-base/version
k8s.io/component-base/version/verflag
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.4-k3s1
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.6-k3s1
k8s.io/cri-api/pkg/apis
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.4-k3s1
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.6-k3s1
k8s.io/csi-translation-lib
k8s.io/csi-translation-lib/plugins
# k8s.io/gengo v0.0.0-20200114144118-36b2048a9120
@ -1800,7 +1800,7 @@ k8s.io/gengo/types
k8s.io/heapster/metrics/api/v1/types
# k8s.io/klog v1.0.0
k8s.io/klog
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.4-k3s1
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.6-k3s1
k8s.io/kube-aggregator/pkg/apis/apiregistration
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
@ -1828,7 +1828,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
k8s.io/kube-aggregator/pkg/registry/apiservice
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.4-k3s1
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.6-k3s1
k8s.io/kube-controller-manager/config/v1alpha1
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
k8s.io/kube-openapi/pkg/aggregator
@ -1839,14 +1839,14 @@ k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/proto/validation
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.4-k3s1
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.6-k3s1
k8s.io/kube-proxy/config/v1alpha1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.4-k3s1
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.6-k3s1
k8s.io/kube-scheduler/config/v1
k8s.io/kube-scheduler/config/v1alpha1
k8s.io/kube-scheduler/config/v1alpha2
k8s.io/kube-scheduler/extender/v1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.4-k3s1
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.6-k3s1
k8s.io/kubectl/pkg/apps
k8s.io/kubectl/pkg/cmd
k8s.io/kubectl/pkg/cmd/annotate
@ -1921,11 +1921,11 @@ k8s.io/kubectl/pkg/util/storage
k8s.io/kubectl/pkg/util/templates
k8s.io/kubectl/pkg/util/term
k8s.io/kubectl/pkg/validation
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.4-k3s1
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.6-k3s1
k8s.io/kubelet/config/v1beta1
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
k8s.io/kubelet/pkg/apis/pluginregistration/v1
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.4-k3s1
# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.6-k3s1
k8s.io/kubernetes/cmd/cloud-controller-manager/app
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme
@ -2669,7 +2669,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
k8s.io/kubernetes/third_party/forked/ipvs
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.4-k3s1
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.6-k3s1
k8s.io/legacy-cloud-providers/aws
k8s.io/legacy-cloud-providers/azure
k8s.io/legacy-cloud-providers/azure/auth
@ -2700,7 +2700,7 @@ k8s.io/legacy-cloud-providers/openstack
k8s.io/legacy-cloud-providers/vsphere
k8s.io/legacy-cloud-providers/vsphere/vclib
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.4-k3s1
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.6-k3s1
k8s.io/metrics/pkg/apis/custom_metrics
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2