From f1a01f4afb42d51ade6ba4c9cfe1412c4fbda58c Mon Sep 17 00:00:00 2001 From: Erik Wilson Date: Tue, 26 May 2020 15:59:35 -0700 Subject: [PATCH] Update k8s 1.18.3 --- go.mod | 50 +++---- go.sum | 92 +++++++------ .../k8s.io/apimachinery/pkg/util/json/json.go | 25 ++++ .../k8s.io/apimachinery/pkg/util/wait/wait.go | 27 +++- vendor/k8s.io/apiserver/pkg/server/config.go | 1 + vendor/k8s.io/client-go/pkg/version/base.go | 6 +- .../tools/clientcmd/client_config.go | 11 +- .../resourcelock/configmaplock.go | 3 + vendor/k8s.io/cloud-provider/go.sum | 4 +- vendor/k8s.io/component-base/version/base.go | 6 +- .../csi-translation-lib/CONTRIBUTING.md | 2 +- vendor/k8s.io/csi-translation-lib/go.mod | 2 + vendor/k8s.io/csi-translation-lib/go.sum | 2 +- .../k8s.io/csi-translation-lib/plugins/BUILD | 3 + .../csi-translation-lib/plugins/azure_disk.go | 24 ++-- .../csi-translation-lib/plugins/azure_file.go | 122 ++++++++++++------ .../pkg/util/proto/validation/types.go | 2 +- .../pkg/controller/cloud/node_controller.go | 40 +++--- .../cache/actual_state_of_world.go | 2 + .../operationexecutor/operation_executor.go | 9 +- .../operationexecutor/operation_generator.go | 34 ++--- .../pluginmanager/reconciler/reconciler.go | 2 +- .../kubernetes/pkg/scheduler/eventhandlers.go | 3 + .../noderesources/resource_allocation.go | 10 ++ .../pkg/scheduler/nodeinfo/node_info.go | 20 ++- .../kubernetes/pkg/scheduler/scheduler.go | 3 + .../kubernetes/pkg/scheduler/util/BUILD | 2 + .../kubernetes/pkg/scheduler/util/non_zero.go | 7 + vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD | 8 +- .../kubernetes/pkg/util/ipvs/ipvs_linux.go | 2 +- .../kubernetes/pkg/volume/csi/csi_plugin.go | 55 ++++++-- .../csi/nodeinfomanager/nodeinfomanager.go | 8 +- .../kubernetes/third_party/forked/ipvs/BUILD | 49 +++++++ .../third_party/forked/ipvs}/LICENSE | 0 .../third_party/forked}/ipvs/constants.go | 14 ++ .../third_party/forked}/ipvs/ipvs.go | 3 + .../third_party/forked}/ipvs/netlink.go | 87 +++++++++++-- .../azure/azure_controller_common.go | 17 ++- .../azure/azure_loadbalancer.go | 2 +- .../azure/azure_standard.go | 12 +- .../azure/azure_vmss.go | 27 +++- vendor/modules.txt | 47 ++++--- 42 files changed, 580 insertions(+), 265 deletions(-) create mode 100644 vendor/k8s.io/kubernetes/third_party/forked/ipvs/BUILD rename vendor/{github.com/docker/libnetwork => k8s.io/kubernetes/third_party/forked/ipvs}/LICENSE (100%) rename vendor/{github.com/docker/libnetwork => k8s.io/kubernetes/third_party/forked}/ipvs/constants.go (85%) rename vendor/{github.com/docker/libnetwork => k8s.io/kubernetes/third_party/forked}/ipvs/ipvs.go (97%) rename vendor/{github.com/docker/libnetwork => k8s.io/kubernetes/third_party/forked}/ipvs/netlink.go (89%) diff --git a/go.mod b/go.mod index db2ad3d1d2..14e140908a 100644 --- a/go.mod +++ b/go.mod @@ -32,31 +32,31 @@ replace ( github.com/prometheus/client_model => github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/common => github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 github.com/prometheus/procfs => github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a - k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1 - k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1 - k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1 - k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1 - k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1 - k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1 - k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1 - k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1 - k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1 - k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1 - k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1 - k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1 - k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1 - k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1 - k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1 - k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1 - k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1 - k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1 - k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.2-k3s.1 - k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1 - k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1 - k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.2-k3s.1 - k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.2-k3s.1 - k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.2-k3s.1 - k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.2-k3s.1 + k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1 + k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1 + k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1 + k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1 + k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1 + k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1 + k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1 + k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1 + k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1 + k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1 + k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1 + k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1 + k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1 + k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1 + k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1 + k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1 + k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1 + k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1 + k8s.io/kubernetes => github.com/rancher/kubernetes v1.18.3-k3s1 + k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1 + k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1 + k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.18.3-k3s1 + k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.3-k3s1 + k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.18.3-k3s1 + k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.18.3-k3s1 mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34 ) diff --git a/go.sum b/go.sum index cec4c2b4d4..4c18fa86f0 100644 --- a/go.sum +++ b/go.sum @@ -182,8 +182,6 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 h1:8GFZB1KesbMy2X2zTiJyAuwCow+U1GT0ueD42p59y4k= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -641,49 +639,49 @@ github.com/rancher/helm-controller v0.6.0 h1:nFptBZFWpHga65M6bP04BZGLlzeMgezAXds github.com/rancher/helm-controller v0.6.0/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0= github.com/rancher/kine v0.4.0 h1:1IhWy3TzjExG8xnj46eyUEWdzqNAD1WrgL4eEBKm6Uc= github.com/rancher/kine v0.4.0/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA= -github.com/rancher/kubernetes v1.18.2-k3s.1 h1:LhWNObWF7dL/+T57LkYpuRKtsCBpt0P5G6dRVFG+Ncs= -github.com/rancher/kubernetes v1.18.2-k3s.1/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0= -github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1 h1:tYDY9g8+xLwUcsG9T6Xg7cBkO/vgU6yv7cQKqUN6NDE= -github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= -github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1 h1:mjOCIsG8WfV2LROU8xLVGEZ9G5uWrD/xHNc87rLPss8= -github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1/go.mod h1:tMuEHO85+WtdJsLBJ1U4bh7oB23v/D4vP0BtL39qxM4= -github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1 h1:w2RnTwBNOi1QHYFoXbFLXx3Gaw3pPbplxbUPpl76hjw= -github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1/go.mod h1:3Y3wDqxL/YFcgzyFD7r80Z6lMxpRJb/V+dds+b7A2NE= -github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1 h1:/6mV1HGv/QoAjFCPlSZfkdeJagmT8gYwiaOsXxjTZEM= -github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1/go.mod h1:SeOQ7d1nUCULR978oKuSossKyGzova3DlaXEa1zJ1ns= -github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1 h1:SlXTYNBxaWbxSPyHNRQ/epxqixUiokY04Wh+8gBYTXA= -github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1/go.mod h1:M8WtUx89NzNZ4Qx/1diDW/1TSs2Pv9J6//dIYvvtwSs= -github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1 h1:qCJO8jfGrmJk7Wn8jfqekOua5PizO/joSQUB89vxLB0= -github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I= -github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1 h1:PmV2L98GjPbD+0UvMR//4I8DiEraMOEl7fq65OGd4hI= -github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= -github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1 h1:xTdvOPd4qckfxaqE0vYTdcVhFlYFN7bcS7xg1bnq9Y4= -github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= -github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1 h1:UMkQrPLLHpAH+jKNtMBIZw1i2wSuNSgxu7G48WLsoi0= -github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1/go.mod h1:Yai6SRJt/nb3VvQw4jKKZBtXRJF/OrswWmfjyF6FqP0= -github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1 h1:kZyprzrCOdYg1GdB4OPOu7ie2Zyw9ocO6Fa3iG2BhKc= -github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= -github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1 h1:ULoh4AB2JiSHA3ELUD56zRh7cnL6/bU8I6AaBbRI/xo= -github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= -github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1 h1:1rhSNnADx+2NMudlqoFC1cBjPLblQ0sZeKkiWIUvJZY= -github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1/go.mod h1:BBW+zYPd9dNyl1qZ3U/coU05IW6AvRAbo3s86WKDfzU= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1 h1:j0DihywFZbNqzlEE9UikIXoynvLumJFZNHcPrYTr63E= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1/go.mod h1:EY3DrCVVj6X1xeVtHF/0lht5TZK9YhKLyfe6QBD3QvE= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1 h1:FEyiGSFRKYejw8aRPbOTfIVukL0DkwhgdfmN36zQrBo= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1 h1:fHJ7O2jLBQhDnw7ahecdtmx1l3mJ25fwWtlq3cOPrxw= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1 h1:7ZGcqlwBSyLMLVT9r7F9jHRc+dhnlumrHYfblZilkl4= -github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= -github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1 h1:oNlYS9R/bsVnlUIeXrbJAxvNPlqhqksJZAoA4eneAdc= -github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1/go.mod h1:lLLodYzjtaBEMebmdtYz2Sh+X8/3DB79imNypXbONsE= -github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1 h1:Fl7NvvCzMDsZWYIh2F3MzQB+EPl7Xh0TTFTAw6SZNbo= -github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= -github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1 h1:nldhxCsspFtJPzus/aeLcednyDvAesVgu/XIE5Qa6/8= -github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= -github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1 h1:ha8xCCbv3iPsXg+TjB+ZHHxxRyuiWWB9bgTDkgHmLCk= -github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= -github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.2-k3s.1/go.mod h1:gpiIUEAyQvSEXKbsH2taOEzkrHXvoZwHuArWgR+DpG8= +github.com/rancher/kubernetes v1.18.3-k3s1 h1:QYh2MY+odOzBQedwClFdX1tRtYQADaFRWi+etbBJvuU= +github.com/rancher/kubernetes v1.18.3-k3s1/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M= +github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1 h1:wNUROW7IOAbW0pCNtdKvDWIRUKE5pk3jffttSMyGn8s= +github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= +github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1 h1:arrGaUaK4WJ/x0TMSw7wxMDlK+akOfq2Yk4MjMdnkqQ= +github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= +github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1 h1:3rwFiKyQ7FL2hdqQNdJnP0BaM0a8L1tmGf+h1Nrqeno= +github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg= +github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1 h1:Ai2duIKWpX9IxW/sQL1ojbAX9KcJ9TPLQsogR9vafJA= +github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg= +github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1 h1:TQrvULLk+ESOptqV09QK6zzPu/IV7kJSxLTa9c5OTbE= +github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I= +github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1 h1:P3kIcwlm5w/XW8HgpyOYxOm70ZfZEtZm3xpHuOnlx6M= +github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I= +github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1 h1:czS0txmHj7i7dRqppu6ekwFigMsZUHMMmaSySuRcQSE= +github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= +github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1 h1:wjE5mqAyInJ33cx0St7jYWdy97O5hRBlxysQCH7kvU4= +github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= +github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1 h1:SBCvaudegFLUmSl0rfariVQjGhsH/A0AV2rq8ys3IU4= +github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= +github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1 h1:/47POpHTRsfFNc0k2Ruq67vqOtPms5FA9TXo9ci6FZc= +github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= +github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1 h1:/VofAOz4+KX9zda4+M8WnE5eDn82ezYyBz7HuzUoBq0= +github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= +github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1 h1:+CsRXq96B0ThQuI0x0i975CBcRKYfrUlR4/s+h3vYxU= +github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1 h1:qDmjyk7BoZO7TcGpU6YKOZVPn84628tC8s0r8Xz/6M0= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1 h1:Y9ySTyuhR84dJaYzwBHmKxBtHQ2uWIoP9VL4iYCvwUM= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1 h1:K1sU4yqMdWaCGj4jEusK+PFowCCUuhTiH3jUriQD5WU= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1 h1:NqvNsqKpXJ7ZAFLOmxj6gvRktx0Oga9DH0M979za6uI= +github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= +github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1 h1:j4l5XcOKNqDtqIMrdDJLQtO+4LAcaojGa/etG6lv354= +github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8= +github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1 h1:qqKfrPB2ghGqf/ElQrVmZaVvm+/DSupWAApEe4Zk5Uk= +github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= +github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1 h1:xb3ZtwF3emE38qi8XWjEw+ES4WKe3k4B4Sr8YGFWEHo= +github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= +github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1 h1:r7qvKjbV7XHI3W9a8Jhzsiujrp7d76USez5i1LZNfYc= +github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= +github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.18.3-k3s1/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc= github.com/rancher/remotedialer v0.2.0/go.mod h1:tkU8ZvrR5lRgaKWaX71nAy6daeqvPFx/lJEnbW7tXSI= @@ -1067,8 +1065,8 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190502190224-411b2483e503/go.mod h1:iU+ZGYsNlvU9XKUSso6SQfKTCCw7lFduMZy26Mgr2Fw= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 0e2e301754..204834883f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error { // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertSliceNumbers(*v, 0) + case *interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertInterfaceNumbers(v, 0) + default: return json.Unmarshal(data, v) } } +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) + } + return err +} + // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited func convertMapNumbers(m map[string]interface{}, depth int) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 4cb0c122c0..d759d912be 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance } // BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained. +// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() +// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in +// undetermined behavior. // The BackoffManager is supposed to be called in a single-threaded environment. type BackoffManager interface { Backoff() clock.Timer @@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du Steps: math.MaxInt32, Cap: maxBackoff, }, - backoffTimer: c.NewTimer(0), + backoffTimer: nil, initialBackoff: initBackoff, lastBackoffStart: c.Now(), backoffResetDuration: resetDuration, @@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { return b.backoff.Step() } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff. +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - b.backoffTimer.Reset(b.getNextBackoff()) + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } return b.backoffTimer } @@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C clock: c, duration: duration, jitter: jitter, - backoffTimer: c.NewTimer(0), + backoffTimer: nil, } } @@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { return jitteredPeriod } +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - j.backoffTimer.Reset(j.getNextBackoff()) + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } return j.backoffTimer } diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index 6268f124ec..1b7aa0d655 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -682,6 +682,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { if c.SecureServing != nil && !c.SecureServing.DisableHTTP2 && c.GoawayChance > 0 { handler = genericfilters.WithProbabilisticGoaway(handler, c.GoawayChance) } + handler = genericapifilters.WithCacheControl(handler) handler = genericfilters.WithPanicRecovery(handler) return handler } diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index b91378ff10..4f0f8daa44 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "18" - gitVersion = "v1.18.2-k3s.1" - gitCommit = "3d7d34a23ec464c08b81486aeca0b7d1bb6e044c" + gitVersion = "v1.18.3-k3s1" + gitCommit = "e3eaf6c5726840b2e31ea2b0a2ea1c2d40ffed2a" gitTreeState = "clean" - buildDate = "2020-04-19T05:33:19Z" + buildDate = "2020-05-26T21:45:32Z" ) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index 5096f51d2f..a9806384aa 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -35,7 +35,7 @@ import ( var ( // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields // DEPRECATED will be replaced - ClusterDefaults = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} // DefaultClientConfig represents the legacy behavior of this package for defaulting // DEPRECATED will be replace DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ @@ -43,6 +43,15 @@ var ( }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} ) +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + // ClientConfig is used to make it easy to get an api server client type ClientConfig interface { // RawConfig returns the merged result of all overrides diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 608f752499..6390b4ef5f 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -88,6 +88,9 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord) if err != nil { return err } + if cml.cm.Annotations == nil { + cml.cm.Annotations = make(map[string]string) + } cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{}) return err diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 2de744b4d0..2594ffc362 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -172,8 +172,8 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go index b91378ff10..4f0f8daa44 100644 --- a/vendor/k8s.io/component-base/version/base.go +++ b/vendor/k8s.io/component-base/version/base.go @@ -3,8 +3,8 @@ package version var ( gitMajor = "1" gitMinor = "18" - gitVersion = "v1.18.2-k3s.1" - gitCommit = "3d7d34a23ec464c08b81486aeca0b7d1bb6e044c" + gitVersion = "v1.18.3-k3s1" + gitCommit = "e3eaf6c5726840b2e31ea2b0a2ea1c2d40ffed2a" gitTreeState = "clean" - buildDate = "2020-04-19T05:33:19Z" + buildDate = "2020-05-26T21:45:32Z" ) diff --git a/vendor/k8s.io/csi-translation-lib/CONTRIBUTING.md b/vendor/k8s.io/csi-translation-lib/CONTRIBUTING.md index f82be8de04..ecdb5f9524 100644 --- a/vendor/k8s.io/csi-translation-lib/CONTRIBUTING.md +++ b/vendor/k8s.io/csi-translation-lib/CONTRIBUTING.md @@ -2,6 +2,6 @@ Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. -This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/csi-api](https://git.k8s.io/kubernetes/staging/src/k8s.io/csi-api) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/csi-translation-lib](https://git.k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information. diff --git a/vendor/k8s.io/csi-translation-lib/go.mod b/vendor/k8s.io/csi-translation-lib/go.mod index 1dcdee600e..4018998d91 100644 --- a/vendor/k8s.io/csi-translation-lib/go.mod +++ b/vendor/k8s.io/csi-translation-lib/go.mod @@ -5,9 +5,11 @@ module k8s.io/csi-translation-lib go 1.13 require ( + github.com/stretchr/testify v1.4.0 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/cloud-provider v0.0.0 + k8s.io/klog v1.0.0 ) replace ( diff --git a/vendor/k8s.io/csi-translation-lib/go.sum b/vendor/k8s.io/csi-translation-lib/go.sum index e930655f50..bb6a817cae 100644 --- a/vendor/k8s.io/csi-translation-lib/go.sum +++ b/vendor/k8s.io/csi-translation-lib/go.sum @@ -156,7 +156,7 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= diff --git a/vendor/k8s.io/csi-translation-lib/plugins/BUILD b/vendor/k8s.io/csi-translation-lib/plugins/BUILD index d3125111b6..3baad53dd7 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/BUILD +++ b/vendor/k8s.io/csi-translation-lib/plugins/BUILD @@ -19,6 +19,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/cloud-provider/volume:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) @@ -49,5 +50,7 @@ go_test( deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) diff --git a/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go b/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go index c9e8472c85..1ccef2b68a 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/azure_disk.go @@ -110,22 +110,23 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) return nil, fmt.Errorf("pv is nil or Azure Disk source not defined on pv") } - azureSource := pv.Spec.PersistentVolumeSource.AzureDisk + var ( + azureSource = pv.Spec.PersistentVolumeSource.AzureDisk - // refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md - csiSource := &v1.CSIPersistentVolumeSource{ - Driver: AzureDiskDriverName, - VolumeHandle: azureSource.DataDiskURI, - ReadOnly: *azureSource.ReadOnly, - FSType: *azureSource.FSType, - VolumeAttributes: map[string]string{azureDiskKind: "Managed"}, - } + // refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md + csiSource = &v1.CSIPersistentVolumeSource{ + Driver: AzureDiskDriverName, + VolumeAttributes: map[string]string{azureDiskKind: "Managed"}, + VolumeHandle: azureSource.DataDiskURI, + } + ) if azureSource.CachingMode != nil { csiSource.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode) } if azureSource.FSType != nil { + csiSource.FSType = *azureSource.FSType csiSource.VolumeAttributes[azureDiskFSType] = *azureSource.FSType } @@ -133,9 +134,12 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) csiSource.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind) } + if azureSource.ReadOnly != nil { + csiSource.ReadOnly = *azureSource.ReadOnly + } + pv.Spec.PersistentVolumeSource.AzureDisk = nil pv.Spec.PersistentVolumeSource.CSI = csiSource - pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes) return pv, nil } diff --git a/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go b/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go index 7904542145..76a3b86dbb 100644 --- a/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go +++ b/vendor/k8s.io/csi-translation-lib/plugins/azure_file.go @@ -18,11 +18,13 @@ package plugins import ( "fmt" + "regexp" "strings" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" ) const ( @@ -32,14 +34,19 @@ const ( AzureFileInTreePluginName = "kubernetes.io/azure-file" separator = "#" - volumeIDTemplate = "%s#%s#%s" + volumeIDTemplate = "%s#%s#%s#%s" // Parameter names defined in azure file CSI driver, refer to // https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md azureFileShareName = "shareName" + + secretNameTemplate = "azure-storage-account-%s-secret" + defaultSecretNamespace = "default" ) var _ InTreePlugin = &azureFileCSITranslator{} +var secretNameFormatRE = regexp.MustCompile(`azure-storage-account-(.+)-secret`) + // azureFileCSITranslator handles translation of PV spec from In-tree // Azure File to CSI Azure File and vice versa type azureFileCSITranslator struct{} @@ -58,32 +65,41 @@ func (t *azureFileCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.St // and converts the AzureFile source to a CSIPersistentVolumeSource func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) { if volume == nil || volume.AzureFile == nil { - return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume") + return nil, fmt.Errorf("volume is nil or Azure File not defined on volume") } azureSource := volume.AzureFile + accountName, err := getStorageAccountName(azureSource.SecretName) + if err != nil { + klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err) + accountName = azureSource.SecretName + } - pv := &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - // Must be unique per disk as it is used as the unique part of the - // staging path - Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName), - }, - Spec: v1.PersistentVolumeSpec{ - PersistentVolumeSource: v1.PersistentVolumeSource{ - CSI: &v1.CSIPersistentVolumeSource{ - VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName), - ReadOnly: azureSource.ReadOnly, - VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName}, - NodePublishSecretRef: &v1.SecretReference{ - Name: azureSource.ShareName, - Namespace: "default", + var ( + pv = &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + // Must be unique per disk as it is used as the unique part of the + // staging path + Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName), + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: AzureFileDriverName, + VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""), + ReadOnly: azureSource.ReadOnly, + VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName}, + NodeStageSecretRef: &v1.SecretReference{ + Name: azureSource.SecretName, + Namespace: defaultSecretNamespace, + }, }, }, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, }, - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, - }, - } + } + ) + return pv, nil } @@ -95,23 +111,33 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) } azureSource := pv.Spec.PersistentVolumeSource.AzureFile - - volumeID := fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName) - // refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md - csiSource := &v1.CSIPersistentVolumeSource{ - VolumeHandle: volumeID, - ReadOnly: azureSource.ReadOnly, - VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName}, + accountName, err := getStorageAccountName(azureSource.SecretName) + if err != nil { + klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err) + accountName = azureSource.SecretName } + volumeID := fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, "") - csiSource.NodePublishSecretRef = &v1.SecretReference{ - Name: azureSource.ShareName, - Namespace: *azureSource.SecretNamespace, + var ( + // refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md + csiSource = &v1.CSIPersistentVolumeSource{ + Driver: AzureFileDriverName, + NodeStageSecretRef: &v1.SecretReference{ + Name: azureSource.SecretName, + Namespace: defaultSecretNamespace, + }, + ReadOnly: azureSource.ReadOnly, + VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName}, + VolumeHandle: volumeID, + } + ) + + if azureSource.SecretNamespace != nil { + csiSource.NodeStageSecretRef.Namespace = *azureSource.SecretNamespace } pv.Spec.PersistentVolumeSource.AzureFile = nil pv.Spec.PersistentVolumeSource.CSI = csiSource - pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes) return pv, nil } @@ -129,22 +155,21 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) ReadOnly: csiSource.ReadOnly, } - if csiSource.NodePublishSecretRef != nil && csiSource.NodePublishSecretRef.Name != "" { - azureSource.SecretName = csiSource.NodePublishSecretRef.Name - azureSource.SecretNamespace = &csiSource.NodePublishSecretRef.Namespace + if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" { + azureSource.SecretName = csiSource.NodeStageSecretRef.Name + azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace if csiSource.VolumeAttributes != nil { if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok { azureSource.ShareName = shareName } } } else { - _, _, fileShareName, err := getFileShareInfo(csiSource.VolumeHandle) + _, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle) if err != nil { return nil, err } azureSource.ShareName = fileShareName - // to-do: for dynamic provision scenario in CSI, it uses cluster's identity to get storage account key - // secret for the file share is not created, we may create a serect here + azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount) } pv.Spec.CSI = nil @@ -182,12 +207,25 @@ func (t *azureFileCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) } // get file share info according to volume id, e.g. -// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41" -// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41 -func getFileShareInfo(id string) (string, string, string, error) { +// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41#diskname.vhd" +// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41, diskname.vhd +func getFileShareInfo(id string) (string, string, string, string, error) { segments := strings.Split(id, separator) if len(segments) < 3 { - return "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id) + return "", "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id) } - return segments[0], segments[1], segments[2], nil + var diskName string + if len(segments) > 3 { + diskName = segments[3] + } + return segments[0], segments[1], segments[2], diskName, nil +} + +// get storage account name from secret name +func getStorageAccountName(secretName string) (string, error) { + matches := secretNameFormatRE.FindStringSubmatch(secretName) + if len(matches) != 2 { + return "", fmt.Errorf("could not get account name from %s, correct format: %s", secretName, secretNameFormatRE) + } + return matches[1], nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go index 6a9f68c0db..e66342a7f1 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -210,7 +210,7 @@ func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) { } case proto.Number: switch item.Kind { - case proto.Number: + case proto.Integer, proto.Number: return } case proto.String: diff --git a/vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go b/vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go index d8353e3c94..dda5c76299 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/cloud/node_controller.go @@ -240,7 +240,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1. return } - nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, node) + nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, node.Spec.ProviderID, node.Name) if err != nil { klog.Errorf("Error getting node addresses for node %q: %v", node.Name, err) return @@ -408,10 +408,14 @@ func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Nod // All of the returned functions are idempotent, because they are used in a retry-if-conflict // loop, meaning they could get called multiple times. func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Context, node *v1.Node, instances cloudprovider.Instances) ([]nodeModifier, error) { - var nodeModifiers []nodeModifier + var ( + nodeModifiers []nodeModifier + providerID string + err error + ) if node.Spec.ProviderID == "" { - providerID, err := cloudprovider.GetInstanceProviderID(ctx, cnc.cloud, types.NodeName(node.Name)) + providerID, err = cloudprovider.GetInstanceProviderID(ctx, cnc.cloud, types.NodeName(node.Name)) if err == nil { nodeModifiers = append(nodeModifiers, func(n *v1.Node) { if n.Spec.ProviderID == "" { @@ -429,9 +433,11 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co // do not, the taint will be removed, and this will not be retried return nil, err } + } else { + providerID = node.Spec.ProviderID } - nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, node) + nodeAddresses, err := getNodeAddressesByProviderIDOrName(ctx, instances, providerID, node.Name) if err != nil { return nil, err } @@ -444,7 +450,7 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co } } - if instanceType, err := getInstanceTypeByProviderIDOrName(ctx, instances, node); err != nil { + if instanceType, err := getInstanceTypeByProviderIDOrName(ctx, instances, providerID, node.Name); err != nil { return nil, err } else if instanceType != "" { klog.V(2).Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType) @@ -459,7 +465,7 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co } if zones, ok := cnc.cloud.Zones(); ok { - zone, err := getZoneByProviderIDOrName(ctx, zones, node) + zone, err := getZoneByProviderIDOrName(ctx, zones, providerID, node.Name) if err != nil { return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) } @@ -532,11 +538,11 @@ func ensureNodeExistsByProviderID(ctx context.Context, instances cloudprovider.I return instances.InstanceExistsByProviderID(ctx, providerID) } -func getNodeAddressesByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, node *v1.Node) ([]v1.NodeAddress, error) { - nodeAddresses, err := instances.NodeAddressesByProviderID(ctx, node.Spec.ProviderID) +func getNodeAddressesByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, providerID, nodeName string) ([]v1.NodeAddress, error) { + nodeAddresses, err := instances.NodeAddressesByProviderID(ctx, providerID) if err != nil { providerIDErr := err - nodeAddresses, err = instances.NodeAddresses(ctx, types.NodeName(node.Name)) + nodeAddresses, err = instances.NodeAddresses(ctx, types.NodeName(nodeName)) if err != nil { return nil, fmt.Errorf("error fetching node by provider ID: %v, and error by node name: %v", providerIDErr, err) } @@ -577,11 +583,13 @@ func ensureNodeProvidedIPExists(node *v1.Node, nodeAddresses []v1.NodeAddress) ( return nodeIP, nodeIPExists } -func getInstanceTypeByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, node *v1.Node) (string, error) { - instanceType, err := instances.InstanceTypeByProviderID(ctx, node.Spec.ProviderID) +// getInstanceTypeByProviderIDOrName will attempt to get the instance type of node using its providerID +// then it's name. If both attempts fail, an error is returned. +func getInstanceTypeByProviderIDOrName(ctx context.Context, instances cloudprovider.Instances, providerID, nodeName string) (string, error) { + instanceType, err := instances.InstanceTypeByProviderID(ctx, providerID) if err != nil { providerIDErr := err - instanceType, err = instances.InstanceType(ctx, types.NodeName(node.Name)) + instanceType, err = instances.InstanceType(ctx, types.NodeName(nodeName)) if err != nil { return "", fmt.Errorf("InstanceType: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err) } @@ -590,12 +598,12 @@ func getInstanceTypeByProviderIDOrName(ctx context.Context, instances cloudprovi } // getZoneByProviderIDorName will attempt to get the zone of node using its providerID -// then it's name. If both attempts fail, an error is returned -func getZoneByProviderIDOrName(ctx context.Context, zones cloudprovider.Zones, node *v1.Node) (cloudprovider.Zone, error) { - zone, err := zones.GetZoneByProviderID(ctx, node.Spec.ProviderID) +// then it's name. If both attempts fail, an error is returned. +func getZoneByProviderIDOrName(ctx context.Context, zones cloudprovider.Zones, providerID, nodeName string) (cloudprovider.Zone, error) { + zone, err := zones.GetZoneByProviderID(ctx, providerID) if err != nil { providerIDErr := err - zone, err = zones.GetZoneByNodeName(ctx, types.NodeName(node.Name)) + zone, err = zones.GetZoneByNodeName(ctx, types.NodeName(nodeName)) if err != nil { return cloudprovider.Zone{}, fmt.Errorf("Zone: Error fetching by providerID: %v Error fetching by NodeName: %v", providerIDErr, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go index 904e8015a4..f95b815e89 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache/actual_state_of_world.go @@ -79,6 +79,8 @@ var _ ActualStateOfWorld = &actualStateOfWorld{} type PluginInfo struct { SocketPath string Timestamp time.Time + Handler PluginHandler + Name string } func (asw *actualStateOfWorld) AddPlugin(pluginInfo PluginInfo) error { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go index 12ae38ee40..b6e6019a46 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_executor.go @@ -49,7 +49,7 @@ type OperationExecutor interface { // UnregisterPlugin deregisters the given plugin using a handler in the given plugin handler map. // It then updates the actual state of the world to reflect that. - UnregisterPlugin(socketPath string, pluginHandlers map[string]cache.PluginHandler, actualStateOfWorld ActualStateOfWorldUpdater) error + UnregisterPlugin(pluginInfo cache.PluginInfo, actualStateOfWorld ActualStateOfWorldUpdater) error } // NewOperationExecutor returns a new instance of OperationExecutor. @@ -105,12 +105,11 @@ func (oe *operationExecutor) RegisterPlugin( } func (oe *operationExecutor) UnregisterPlugin( - socketPath string, - pluginHandlers map[string]cache.PluginHandler, + pluginInfo cache.PluginInfo, actualStateOfWorld ActualStateOfWorldUpdater) error { generatedOperation := - oe.operationGenerator.GenerateUnregisterPluginFunc(socketPath, pluginHandlers, actualStateOfWorld) + oe.operationGenerator.GenerateUnregisterPluginFunc(pluginInfo, actualStateOfWorld) return oe.pendingOperations.Run( - socketPath, generatedOperation) + pluginInfo.SocketPath, generatedOperation) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go index a16546601d..c1eb242f45 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/operationexecutor/operation_generator.go @@ -67,8 +67,7 @@ type OperationGenerator interface { // Generates the UnregisterPlugin function needed to perform the unregistration of a plugin GenerateUnregisterPluginFunc( - socketPath string, - pluginHandlers map[string]cache.PluginHandler, + pluginInfo cache.PluginInfo, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error } @@ -115,6 +114,8 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( err = actualStateOfWorldUpdater.AddPlugin(cache.PluginInfo{ SocketPath: socketPath, Timestamp: timestamp, + Handler: handler, + Name: infoResp.Name, }) if err != nil { klog.Errorf("RegisterPlugin error -- failed to add plugin at socket %s, err: %v", socketPath, err) @@ -133,35 +134,20 @@ func (og *operationGenerator) GenerateRegisterPluginFunc( } func (og *operationGenerator) GenerateUnregisterPluginFunc( - socketPath string, - pluginHandlers map[string]cache.PluginHandler, + pluginInfo cache.PluginInfo, actualStateOfWorldUpdater ActualStateOfWorldUpdater) func() error { unregisterPluginFunc := func() error { - client, conn, err := dial(socketPath, dialTimeoutDuration) - if err != nil { - return fmt.Errorf("UnregisterPlugin error -- dial failed at socket %s, err: %v", socketPath, err) + if pluginInfo.Handler == nil { + return fmt.Errorf("UnregisterPlugin error -- failed to get plugin handler for %s", pluginInfo.SocketPath) } - defer conn.Close() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - infoResp, err := client.GetInfo(ctx, ®isterapi.InfoRequest{}) - if err != nil { - return fmt.Errorf("UnregisterPlugin error -- failed to get plugin info using RPC GetInfo at socket %s, err: %v", socketPath, err) - } - - handler, ok := pluginHandlers[infoResp.Type] - if !ok { - return fmt.Errorf("UnregisterPlugin error -- no handler registered for plugin type: %s at socket %s", infoResp.Type, socketPath) - } - // We remove the plugin to the actual state of world cache before calling a plugin consumer's Unregister handle // so that if we receive a register event during Register Plugin, we can process it as a Register call. - actualStateOfWorldUpdater.RemovePlugin(socketPath) + actualStateOfWorldUpdater.RemovePlugin(pluginInfo.SocketPath) - handler.DeRegisterPlugin(infoResp.Name) + pluginInfo.Handler.DeRegisterPlugin(pluginInfo.Name) + + klog.V(4).Infof("DeRegisterPlugin called for %s on %v", pluginInfo.Name, pluginInfo.Handler) return nil } return unregisterPluginFunc diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go index 7f6790d5c1..11d02116be 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/pluginmanager/reconciler/reconciler.go @@ -127,7 +127,7 @@ func (rc *reconciler) reconcile() { if unregisterPlugin { klog.V(5).Infof(registeredPlugin.GenerateMsgDetailed("Starting operationExecutor.UnregisterPlugin", "")) - err := rc.operationExecutor.UnregisterPlugin(registeredPlugin.SocketPath, rc.getHandlers(), rc.actualStateOfWorld) + err := rc.operationExecutor.UnregisterPlugin(registeredPlugin, rc.actualStateOfWorld) if err != nil && !goroutinemap.IsAlreadyExists(err) && !exponentialbackoff.IsExponentialBackoff(err) { diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go b/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go index f6d8bb1c96..0b4f6d9901 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/eventhandlers.go @@ -335,6 +335,9 @@ func (sched *Scheduler) skipPodUpdate(pod *v1.Pod) bool { // Annotations must be excluded for the reasons described in // https://github.com/kubernetes/kubernetes/issues/52914. p.Annotations = nil + // Same as above, when annotations are modified with ServerSideApply, + // ManagedFields may also change and must be excluded + p.ManagedFields = nil return p } assumedPodCopy, podCopy := f(assumedPod), f(pod) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go index f091ddfc63..11cefa648b 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go @@ -117,6 +117,7 @@ func calculateResourceAllocatableRequest(nodeInfo *schedulernodeinfo.NodeInfo, p // calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the // PodOverhead feature is enabled, the Overhead is added to the result. +// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { var podRequest int64 for i := range pod.Spec.Containers { @@ -125,11 +126,20 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { podRequest += value } + for i := range pod.Spec.InitContainers { + initContainer := &pod.Spec.InitContainers[i] + value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests) + if podRequest < value { + podRequest = value + } + } + // If Overhead is being utilized, add to the total requests for the pod if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { if quantity, found := pod.Spec.Overhead[resource]; found { podRequest += quantity.Value() } } + return podRequest } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go b/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go index b8d14adf85..085b65188c 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go @@ -173,7 +173,10 @@ func (r *Resource) Add(rl v1.ResourceList) { case v1.ResourcePods: r.AllowedPodNumber += int(rQuant.Value()) case v1.ResourceEphemeralStorage: - r.EphemeralStorage += rQuant.Value() + if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + // if the local storage capacity isolation feature gate is disabled, pods request 0 disk. + r.EphemeralStorage += rQuant.Value() + } default: if v1helper.IsScalarResourceName(rName) { r.AddScalar(rName, rQuant.Value()) @@ -565,21 +568,32 @@ func (n *NodeInfo) resetSlicesIfEmpty() { } } +// resourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) { resPtr := &res for _, c := range pod.Spec.Containers { resPtr.Add(c.Resources.Requests) - non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests) non0CPU += non0CPUReq non0Mem += non0MemReq // No non-zero resources for GPUs or opaque resources. } + for _, ic := range pod.Spec.InitContainers { + resPtr.SetMaxResource(ic.Resources.Requests) + non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&ic.Resources.Requests) + if non0CPU < non0CPUReq { + non0CPU = non0CPUReq + } + + if non0Mem < non0MemReq { + non0Mem = non0MemReq + } + } + // If Overhead is being utilized, add to the total requests for the pod if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { resPtr.Add(pod.Spec.Overhead) - if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found { non0CPU += pod.Spec.Overhead.Cpu().MilliValue() } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go b/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go index 0cbc50d684..e0bc2759bc 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go @@ -592,6 +592,9 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) { // succeeds, the pod should get counted as a success the next time we try to // schedule it. (hopefully) metrics.PodScheduleFailures.Inc() + } else if err == core.ErrNoNodesAvailable { + // No nodes available is counted as unschedulable rather than an error. + metrics.PodScheduleFailures.Inc() } else { klog.Errorf("error selecting node for pod: %v", err) metrics.PodScheduleErrors.Inc() diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD b/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD index 857710883e..93d34c34c8 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/util/BUILD @@ -40,10 +40,12 @@ go_library( deps = [ "//pkg/api/v1/pod:go_default_library", "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/non_zero.go b/vendor/k8s.io/kubernetes/pkg/scheduler/util/non_zero.go index e29eced615..98be63524f 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/non_zero.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/util/non_zero.go @@ -18,7 +18,9 @@ package util import ( v1 "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/features" ) // For each of these resources, a pod that doesn't request the resource explicitly @@ -60,6 +62,11 @@ func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.Resourc } return requests.Memory().Value() case v1.ResourceEphemeralStorage: + // if the local storage capacity isolation feature gate is disabled, pods request 0 disk. + if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + return 0 + } + quantity, found := (*requests)[v1.ResourceEphemeralStorage] if !found { return 0 diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD b/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD index c4ac2cf924..25f7f4f790 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/ipvs/BUILD @@ -17,10 +17,10 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//third_party/forked/ipvs:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//third_party/forked/ipvs:go_default_library", ], "//conditions:default": [], }), @@ -39,7 +39,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//third_party/forked/ipvs:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], @@ -56,7 +56,7 @@ go_library( "//vendor/k8s.io/utils/exec:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//vendor/github.com/docker/libnetwork/ipvs:go_default_library", + "//third_party/forked/ipvs:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", ], diff --git a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go index 9acfc570a4..846bdb3621 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/ipvs/ipvs_linux.go @@ -27,8 +27,8 @@ import ( "syscall" "time" - libipvs "github.com/docker/libnetwork/ipvs" "k8s.io/klog" + libipvs "k8s.io/kubernetes/third_party/forked/ipvs" utilexec "k8s.io/utils/exec" ) diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go index 21b16f07b5..0a0fe205ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go @@ -17,6 +17,7 @@ limitations under the License. package csi import ( + "context" "errors" "fmt" "os" @@ -24,8 +25,6 @@ import ( "strings" "time" - "context" - "k8s.io/klog" api "k8s.io/api/core/v1" @@ -227,7 +226,7 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) && utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) { - // This function prevents Kubelet from posting Ready status until CSINodeInfo + // This function prevents Kubelet from posting Ready status until CSINode // is both installed and initialized if err := initializeCSINode(host, localNim); err != nil { return errors.New(log("failed to initialize CSINodeInfo: %v", err)) @@ -244,21 +243,28 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error { func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) error { kvh, ok := host.(volume.KubeletVolumeHost) if !ok { - klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINodeInfo initialization, not running on kubelet") + klog.V(4).Info("Cast from VolumeHost to KubeletVolumeHost failed. Skipping CSINode initialization, not running on kubelet") return nil } kubeClient := host.GetKubeClient() if kubeClient == nil { - // Kubelet running in standalone mode. Skip CSINodeInfo initialization - klog.Warning("Skipping CSINodeInfo initialization, kubelet running in standalone mode") + // Kubelet running in standalone mode. Skip CSINode initialization + klog.Warning("Skipping CSINode initialization, kubelet running in standalone mode") return nil } - kvh.SetKubeletError(errors.New("CSINodeInfo is not yet initialized")) + kvh.SetKubeletError(errors.New("CSINode is not yet initialized")) go func() { defer utilruntime.HandleCrash() + // First wait indefinitely to talk to Kube APIServer + nodeName := host.GetNodeName() + err := waitForAPIServerForever(kubeClient, nodeName) + if err != nil { + klog.Fatalf("Failed to initialize CSINode while waiting for API server to report ok: %v", err) + } + // Backoff parameters tuned to retry over 140 seconds. Will fail and restart the Kubelet // after max retry steps. initBackoff := wait.Backoff{ @@ -267,12 +273,12 @@ func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) er Factor: 6.0, Jitter: 0.1, } - err := wait.ExponentialBackoff(initBackoff, func() (bool, error) { - klog.V(4).Infof("Initializing migrated drivers on CSINodeInfo") + err = wait.ExponentialBackoff(initBackoff, func() (bool, error) { + klog.V(4).Infof("Initializing migrated drivers on CSINode") err := nim.InitializeCSINodeWithAnnotation() if err != nil { - kvh.SetKubeletError(fmt.Errorf("Failed to initialize CSINodeInfo: %v", err)) - klog.Errorf("Failed to initialize CSINodeInfo: %v", err) + kvh.SetKubeletError(fmt.Errorf("Failed to initialize CSINode: %v", err)) + klog.Errorf("Failed to initialize CSINode: %v", err) return false, nil } @@ -286,7 +292,7 @@ func initializeCSINode(host volume.VolumeHost, nim nodeinfomanager.Interface) er // using CSI for all Migrated volume plugins. Then all the CSINode initialization // code can be dropped from Kubelet. // Kill the Kubelet process and allow it to restart to retry initialization - klog.Fatalf("Failed to initialize CSINodeInfo after retrying") + klog.Fatalf("Failed to initialize CSINode after retrying: %v", err) } }() return nil @@ -918,3 +924,28 @@ func highestSupportedVersion(versions []string) (*utilversion.Version, error) { } return highestSupportedVersion, nil } + +// waitForAPIServerForever waits forever to get a CSINode instance as a proxy +// for a healthy APIServer +func waitForAPIServerForever(client clientset.Interface, nodeName types.NodeName) error { + var lastErr error + err := wait.PollImmediateInfinite(time.Second, func() (bool, error) { + // Get a CSINode from API server to make sure 1) kubelet can reach API server + // and 2) it has enough permissions. Kubelet may have restricted permissions + // when it's bootstrapping TLS. + // https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/ + _, lastErr = client.StorageV1().CSINodes().Get(context.TODO(), string(nodeName), meta.GetOptions{}) + if lastErr == nil || apierrors.IsNotFound(lastErr) { + // API server contacted + return true, nil + } + klog.V(2).Infof("Failed to contact API server when waiting for CSINode publishing: %s", lastErr) + return false, nil + }) + if err != nil { + // In theory this is unreachable, but just in case: + return fmt.Errorf("%v: %v", err, lastErr) + } + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 98a16be6e8..76b4cc0da1 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -397,16 +397,16 @@ func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error { return goerrors.New("error getting CSI client") } - var updateErrs []error + var lastErr error err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) { - if err := nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); err != nil { - updateErrs = append(updateErrs, err) + if lastErr = nim.tryInitializeCSINodeWithAnnotation(csiKubeClient); lastErr != nil { + klog.V(2).Infof("Failed to publish CSINode: %v", lastErr) return false, nil } return true, nil }) if err != nil { - return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, utilerrors.NewAggregate(updateErrs)) + return fmt.Errorf("error updating CSINode annotation: %v; caused by: %v", err, lastErr) } return nil diff --git a/vendor/k8s.io/kubernetes/third_party/forked/ipvs/BUILD b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/BUILD new file mode 100644 index 0000000000..e5c5765035 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/BUILD @@ -0,0 +1,49 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +licenses(["notice"]) + +go_library( + name = "go_default_library", + srcs = [ + "constants.go", + "ipvs.go", + "netlink.go", + ], + importpath = "k8s.io/kubernetes/third_party/forked/ipvs", + visibility = ["//visibility:public"], + deps = select({ + "@io_bazel_rules_go//go/platform:android": [ + "//vendor/github.com/sirupsen/logrus:go_default_library", + "//vendor/github.com/vishvananda/netlink/nl:go_default_library", + "//vendor/github.com/vishvananda/netns:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "@io_bazel_rules_go//go/platform:linux": [ + "//vendor/github.com/sirupsen/logrus:go_default_library", + "//vendor/github.com/vishvananda/netlink/nl:go_default_library", + "//vendor/github.com/vishvananda/netns:go_default_library", + "//vendor/golang.org/x/sys/unix:go_default_library", + ], + "//conditions:default": [], + }), +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) + +go_test( + name = "go_default_test", + srcs = ["netlink_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/LICENSE similarity index 100% rename from vendor/github.com/docker/libnetwork/LICENSE rename to vendor/k8s.io/kubernetes/third_party/forked/ipvs/LICENSE diff --git a/vendor/github.com/docker/libnetwork/ipvs/constants.go b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/constants.go similarity index 85% rename from vendor/github.com/docker/libnetwork/ipvs/constants.go rename to vendor/k8s.io/kubernetes/third_party/forked/ipvs/constants.go index b6b7f2bb5e..0b66ce9b29 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/constants.go +++ b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/constants.go @@ -1,5 +1,8 @@ // +build linux +// Code and documentation copyright 2015 Docker, inc. +// Code released under the Apache 2.0 license. Docs released under Creative commons. + package ipvs const ( @@ -144,6 +147,17 @@ const ( // a statically assigned hash table by their source IP // addresses. SourceHashing = "sh" + + // WeightedRoundRobin assigns jobs to real servers proportionally + // to there real servers' weight. Servers with higher weights + // receive new jobs first and get more jobs than servers + // with lower weights. Servers with equal weights get + // an equal distribution of new jobs + WeightedRoundRobin = "wrr" + + // WeightedLeastConnection assigns more jobs to servers + // with fewer jobs and relative to the real servers' weight + WeightedLeastConnection = "wlc" ) const ( diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/ipvs.go similarity index 97% rename from vendor/github.com/docker/libnetwork/ipvs/ipvs.go rename to vendor/k8s.io/kubernetes/third_party/forked/ipvs/ipvs.go index 61b6f0a5e4..20088f8483 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go +++ b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/ipvs.go @@ -1,5 +1,8 @@ // +build linux +// Code and documentation copyright 2015 Docker, inc. +// Code released under the Apache 2.0 license. Docs released under Creative commons. + package ipvs import ( diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/netlink.go similarity index 89% rename from vendor/github.com/docker/libnetwork/ipvs/netlink.go rename to vendor/k8s.io/kubernetes/third_party/forked/ipvs/netlink.go index 083909ae05..63ff1f0a75 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/vendor/k8s.io/kubernetes/third_party/forked/ipvs/netlink.go @@ -1,10 +1,14 @@ // +build linux +// Code and documentation copyright 2015 Docker, inc. +// Code released under the Apache 2.0 license. Docs released under Creative commons. + package ipvs import ( "bytes" "encoding/binary" + "errors" "fmt" "net" "os/exec" @@ -315,6 +319,7 @@ func assembleStats(msg []byte) (SvcStats, error) { func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { var s Service + var addressBytes []byte for _, attr := range attrs { @@ -327,11 +332,7 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { case ipvsSvcAttrProtocol: s.Protocol = native.Uint16(attr.Value) case ipvsSvcAttrAddress: - ip, err := parseIP(attr.Value, s.AddressFamily) - if err != nil { - return nil, err - } - s.Address = ip + addressBytes = attr.Value case ipvsSvcAttrPort: s.Port = binary.BigEndian.Uint16(attr.Value) case ipvsSvcAttrFWMark: @@ -353,6 +354,16 @@ func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { } } + + // parse Address after parse AddressFamily incase of parseIP error + if addressBytes != nil { + ip, err := parseIP(addressBytes, s.AddressFamily) + if err != nil { + return nil, err + } + s.Address = ip + } + return &s, nil } @@ -416,18 +427,18 @@ func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) { func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) { var d Destination + var addressBytes []byte for _, attr := range attrs { attrType := int(attr.Attr.Type) switch attrType { + + case ipvsDestAttrAddressFamily: + d.AddressFamily = native.Uint16(attr.Value) case ipvsDestAttrAddress: - ip, err := parseIP(attr.Value, syscall.AF_INET) - if err != nil { - return nil, err - } - d.Address = ip + addressBytes = attr.Value case ipvsDestAttrPort: d.Port = binary.BigEndian.Uint16(attr.Value) case ipvsDestAttrForwardingMethod: @@ -438,8 +449,6 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.UpperThreshold = native.Uint32(attr.Value) case ipvsDestAttrLowerThreshold: d.LowerThreshold = native.Uint32(attr.Value) - case ipvsDestAttrAddressFamily: - d.AddressFamily = native.Uint16(attr.Value) case ipvsDestAttrActiveConnections: d.ActiveConnections = int(native.Uint16(attr.Value)) case ipvsDestAttrInactiveConnections: @@ -452,9 +461,63 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.Stats = DstStats(stats) } } + + // in older kernels (< 3.18), the destination address family attribute doesn't exist so we must + // assume it based on the destination address provided. + if d.AddressFamily == 0 { + // we can't check the address family using net stdlib because netlink returns + // IPv4 addresses as the first 4 bytes in a []byte of length 16 where as + // stdlib expects it as the last 4 bytes. + addressFamily, err := getIPFamily(addressBytes) + if err != nil { + return nil, err + } + d.AddressFamily = addressFamily + } + + // parse Address after parse AddressFamily incase of parseIP error + if addressBytes != nil { + ip, err := parseIP(addressBytes, d.AddressFamily) + if err != nil { + return nil, err + } + d.Address = ip + } + return &d, nil } +// getIPFamily parses the IP family based on raw data from netlink. +// For AF_INET, netlink will set the first 4 bytes with trailing zeros +// 10.0.0.1 -> [10 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0] +// For AF_INET6, the full 16 byte array is used: +// 2001:db8:3c4d:15::1a00 -> [32 1 13 184 60 77 0 21 0 0 0 0 0 0 26 0] +func getIPFamily(address []byte) (uint16, error) { + if len(address) == 4 { + return syscall.AF_INET, nil + } + + if isZeros(address) { + return 0, errors.New("could not parse IP family from address data") + } + + // assume IPv4 if first 4 bytes are non-zero but rest of the data is trailing zeros + if !isZeros(address[:4]) && isZeros(address[4:]) { + return syscall.AF_INET, nil + } + + return syscall.AF_INET6, nil +} + +func isZeros(b []byte) bool { + for i := 0; i < len(b); i++ { + if b[i] != 0 { + return false + } + } + return true +} + // parseDestination given a ipvs netlink response this function will respond with a valid destination entry, an error otherwise func (i *Handle) parseDestination(msg []byte) (*Destination, error) { var dst *Destination diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index ec61cf0ca2..e36bd3e718 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -121,6 +121,11 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri diskEncryptionSetID := "" writeAcceleratorEnabled := false + vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) + if err != nil { + return -1, err + } + if isManagedDisk { diskName := path.Base(diskURI) resourceGroup, err := getResourceGroupFromDiskURI(diskURI) @@ -140,9 +145,12 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri attachErr := fmt.Sprintf( "disk(%s) already attached to node(%s), could not be attached to node(%s)", diskURI, *disk.ManagedBy, nodeName) - attachedNode := path.Base(*disk.ManagedBy) + attachedNode, err := vmset.GetNodeNameByProviderID(*disk.ManagedBy) + if err != nil { + return -1, err + } klog.V(2).Infof("found dangling volume %s attached to node %s", diskURI, attachedNode) - danglingErr := volerr.NewDanglingError(attachErr, types.NodeName(attachedNode), "") + danglingErr := volerr.NewDanglingError(attachErr, attachedNode, "") return -1, danglingErr } @@ -157,11 +165,6 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri } } - vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe) - if err != nil { - return -1, err - } - instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) if err != nil { klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName) diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 597e3e70ce..1ac1d94712 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -407,7 +407,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(service *v1.Service, lb *network.L if pipID == nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress ID is Nil", serviceName, *lb.Name) } - pipName, err := getLastSegment(*pipID) + pipName, err := getLastSegment(*pipID, "/") if err != nil { return nil, fmt.Errorf("get(%s): lb(%s) - failed to get LB PublicIPAddress Name from ID(%s)", serviceName, *lb.Name, *pipID) } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go index 3ceb74415c..5920ed22aa 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -72,7 +72,7 @@ const ( ) var errNotInVMSet = errors.New("vm is not in the vmset") -var providerIDRE = regexp.MustCompile(`^` + CloudProviderName + `://(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`) +var providerIDRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/Microsoft.Compute/virtualMachines/(.+)$`) var backendPoolIDRE = regexp.MustCompile(`^/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Network/loadBalancers/(.+)/backendAddressPools/(?:.*)`) var nicResourceGroupRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/Microsoft.Network/networkInterfaces/(?:.*)`) @@ -171,8 +171,8 @@ func isMasterNode(node *v1.Node) bool { } // returns the deepest child's identifier from a full identifier string. -func getLastSegment(ID string) (string, error) { - parts := strings.Split(ID, "/") +func getLastSegment(ID, separator string) (string, error) { + parts := strings.Split(ID, separator) name := parts[len(parts)-1] if len(name) == 0 { return "", fmt.Errorf("resource name was missing from identifier") @@ -519,7 +519,7 @@ func (as *availabilitySet) GetIPByNodeName(name string) (string, string, error) publicIP := "" if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil { pipID := *ipConfig.PublicIPAddress.ID - pipName, err := getLastSegment(pipID) + pipName, err := getLastSegment(pipID, "/") if err != nil { return "", "", fmt.Errorf("failed to publicIP name for node %q with pipID %q", name, pipID) } @@ -589,7 +589,7 @@ func (as *availabilitySet) getAgentPoolAvailabiliySets(nodes []*v1.Node) (agentP // already added in the list continue } - asName, err := getLastSegment(asID) + asName, err := getLastSegment(asID, "/") if err != nil { klog.Errorf("as.getNodeAvailabilitySet - Node (%s)- getLastSegment(%s), err=%v", nodeName, asID, err) return nil, err @@ -680,7 +680,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri if err != nil { return network.Interface{}, err } - nicName, err := getLastSegment(primaryNicID) + nicName, err := getLastSegment(primaryNicID, "/") if err != nil { return network.Interface{}, err } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index f4d1ff6bea..c4fec3a940 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -189,6 +189,16 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) ( // GetPowerStatusByNodeName returns the power state of the specified node. func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { + managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe) + if err != nil { + klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err) + return "", err + } + if managedByAS { + // vm is managed by availability set. + return ss.availabilitySet.GetPowerStatusByNodeName(name) + } + _, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault) if err != nil { return powerState, err @@ -285,6 +295,11 @@ func (ss *scaleSet) GetInstanceIDByNodeName(name string) (string, error) { } // GetNodeNameByProviderID gets the node name by provider ID. +// providerID example: +// 1. vmas providerID: azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-27053986-0 +// 2. vmss providerID: +// azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1 +// /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1 func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) { // NodeName is not part of providerID for vmss instances. scaleSetName, err := extractScaleSetNameByProviderID(providerID) @@ -298,12 +313,20 @@ func (ss *scaleSet) GetNodeNameByProviderID(providerID string) (types.NodeName, return "", fmt.Errorf("error of extracting resource group for node %q", providerID) } - instanceID, err := getLastSegment(providerID) + instanceID, err := getLastSegment(providerID, "/") if err != nil { klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is mananaged by availability set: %v", providerID, err) return ss.availabilitySet.GetNodeNameByProviderID(providerID) } + // instanceID contains scaleSetName (returned by disk.ManagedBy), e.g. k8s-agentpool-36841236-vmss_1 + if strings.HasPrefix(strings.ToLower(instanceID), strings.ToLower(scaleSetName)) { + instanceID, err = getLastSegment(instanceID, "_") + if err != nil { + return "", err + } + } + vm, err := ss.getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID, azcache.CacheReadTypeUnsafe) if err != nil { return "", err @@ -695,7 +718,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return network.Interface{}, err } - nicName, err := getLastSegment(primaryInterfaceID) + nicName, err := getLastSegment(primaryInterfaceID, "/") if err != nil { klog.Errorf("error: ss.GetPrimaryInterface(%s), getLastSegment(%s), err=%v", nodeName, primaryInterfaceID, err) return network.Interface{}, err diff --git a/vendor/modules.txt b/vendor/modules.txt index 3f3c6e68f8..5914e1a582 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -415,8 +415,6 @@ github.com/docker/go-events github.com/docker/go-metrics # github.com/docker/go-units v0.4.0 github.com/docker/go-units -# github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652 => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 -github.com/docker/libnetwork/ipvs # github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 github.com/docker/spdystream github.com/docker/spdystream/spdy @@ -1142,7 +1140,7 @@ gopkg.in/square/go-jose.v2/jwt gopkg.in/warnings.v0 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.2-k3s.1 +# k8s.io/api v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.18.3-k3s1 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -1186,7 +1184,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.2-k3s.1 +# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.18.3-k3s1 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install @@ -1226,7 +1224,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi k8s.io/apiextensions-apiserver/pkg/registry/customresource k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition -# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.2-k3s.1 +# k8s.io/apimachinery v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.18.3-k3s1 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1288,7 +1286,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.2-k3s.1 +# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.18.3-k3s1 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -1418,7 +1416,7 @@ k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth k8s.io/apiserver/plugin/pkg/authenticator/token/oidc k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.2-k3s.1 +# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.18.3-k3s1 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/kustomize k8s.io/cli-runtime/pkg/kustomize/k8sdeps @@ -1431,7 +1429,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.2-k3s.1 +# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.18.3-k3s1 k8s.io/client-go/discovery k8s.io/client-go/discovery/cached k8s.io/client-go/discovery/cached/disk @@ -1665,7 +1663,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.2-k3s.1 +# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.18.3-k3s1 k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/node/helpers @@ -1673,13 +1671,13 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.2-k3s.1 +# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.18.3-k3s1 k8s.io/cluster-bootstrap/token/api k8s.io/cluster-bootstrap/token/jws k8s.io/cluster-bootstrap/token/util k8s.io/cluster-bootstrap/util/secrets k8s.io/cluster-bootstrap/util/tokens -# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.2-k3s.1 +# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.18.3-k3s1 k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators k8s.io/code-generator/cmd/client-gen/generators/fake @@ -1694,7 +1692,7 @@ k8s.io/code-generator/cmd/lister-gen/args k8s.io/code-generator/cmd/lister-gen/generators k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util -# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.2-k3s.1 +# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.18.3-k3s1 k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag k8s.io/component-base/codec @@ -1712,10 +1710,10 @@ k8s.io/component-base/metrics/prometheus/workqueue k8s.io/component-base/metrics/testutil k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.2-k3s.1 +# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.18.3-k3s1 k8s.io/cri-api/pkg/apis k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.2-k3s.1 +# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.18.3-k3s1 k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 @@ -1730,7 +1728,7 @@ k8s.io/gengo/types k8s.io/heapster/metrics/api/v1/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.2-k3s.1 +# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.18.3-k3s1 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/install k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 @@ -1758,9 +1756,9 @@ k8s.io/kube-aggregator/pkg/controllers/status k8s.io/kube-aggregator/pkg/registry/apiservice k8s.io/kube-aggregator/pkg/registry/apiservice/etcd k8s.io/kube-aggregator/pkg/registry/apiservice/rest -# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.2-k3s.1 +# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.18.3-k3s1 k8s.io/kube-controller-manager/config/v1alpha1 -# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c +# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 k8s.io/kube-openapi/pkg/aggregator k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/common @@ -1769,14 +1767,14 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation -# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.2-k3s.1 +# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.18.3-k3s1 k8s.io/kube-proxy/config/v1alpha1 -# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.2-k3s.1 +# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.18.3-k3s1 k8s.io/kube-scheduler/config/v1 k8s.io/kube-scheduler/config/v1alpha1 k8s.io/kube-scheduler/config/v1alpha2 k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.2-k3s.1 +# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.18.3-k3s1 k8s.io/kubectl/pkg/apps k8s.io/kubectl/pkg/cmd k8s.io/kubectl/pkg/cmd/annotate @@ -1851,11 +1849,11 @@ k8s.io/kubectl/pkg/util/storage k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.2-k3s.1 +# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.18.3-k3s1 k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/pluginregistration/v1 -# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.2-k3s.1 +# k8s.io/kubernetes v1.18.0 => github.com/rancher/kubernetes v1.18.3-k3s1 k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme @@ -2598,7 +2596,8 @@ k8s.io/kubernetes/third_party/forked/gonum/graph k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear k8s.io/kubernetes/third_party/forked/gonum/graph/simple k8s.io/kubernetes/third_party/forked/gonum/graph/traverse -# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.2-k3s.1 +k8s.io/kubernetes/third_party/forked/ipvs +# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.18.3-k3s1 k8s.io/legacy-cloud-providers/aws k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth @@ -2629,7 +2628,7 @@ k8s.io/legacy-cloud-providers/openstack k8s.io/legacy-cloud-providers/vsphere k8s.io/legacy-cloud-providers/vsphere/vclib k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers -# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.2-k3s.1 +# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.18.3-k3s1 k8s.io/metrics/pkg/apis/custom_metrics k8s.io/metrics/pkg/apis/custom_metrics/v1beta1 k8s.io/metrics/pkg/apis/custom_metrics/v1beta2