mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Update Kubernetes to v1.19.3-k3s1
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
parent
b1a7161ccc
commit
03f05f9337
52
go.mod
52
go.mod
@ -31,31 +31,31 @@ replace (
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.2-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.2-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.2-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.2-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.2-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.2-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.2-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.2-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.2-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.2-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.2-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.2-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.2-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.2-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.2-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.2-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.2-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.2-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.2-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.2-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.2-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.2-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.2-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.2-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.2-k3s1
|
||||
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1
|
||||
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1
|
||||
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1
|
||||
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1
|
||||
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1
|
||||
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1
|
||||
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1
|
||||
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1
|
||||
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1
|
||||
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1
|
||||
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1
|
||||
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1
|
||||
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1
|
||||
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1
|
||||
k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.3-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1
|
||||
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1
|
||||
k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.3-k3s1
|
||||
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.3-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.3-k3s1
|
||||
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.3-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
)
|
||||
|
||||
@ -113,6 +113,6 @@ require (
|
||||
k8s.io/component-base v0.19.0
|
||||
k8s.io/cri-api v0.19.0
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/kubernetes v1.19.0
|
||||
k8s.io/kubernetes v1.19.3
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
86
go.sum
86
go.sum
@ -679,49 +679,49 @@ github.com/rancher/helm-controller v0.7.3 h1:WTQHcNF2vl9w6Xd1eBtXDe0JUsYMFFstqX9
|
||||
github.com/rancher/helm-controller v0.7.3/go.mod h1:ZylsxIMGNADRPRNW+NiBWhrwwks9vnKLQiCHYWb6Bi0=
|
||||
github.com/rancher/kine v0.4.1 h1:CPtGDXsov5t5onXwhZ97VBpaxDoj1MBHeQwB0TSrUu8=
|
||||
github.com/rancher/kine v0.4.1/go.mod h1:IImtCJ68AIkE+VY/kUI0NkyJL5q5WzO8QvMsSXqbrpA=
|
||||
github.com/rancher/kubernetes v1.19.2-k3s1 h1:/oTv57BwDcf8kapnr1ViYH98Fwk3vnklWmQdlI3vJE0=
|
||||
github.com/rancher/kubernetes v1.19.2-k3s1/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.2-k3s1 h1:OPBCfsjKfgMaMt0mtWaoy+IirLeD+/CeVxoHXdP5bTE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.2-k3s1/go.mod h1:Y4VjjNur38HL6/QxaTVK2yno1zjEQlvcvwbbRQs2DtQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.2-k3s1 h1:Foybkoz0lZTEQ2+lm8uDDeLeoNAkluH7C9qAedH1QGc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.2-k3s1/go.mod h1:BvtZU215FgO19Oy19K6h8qwajFfjxYqGewgjuYHWGRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.2-k3s1 h1:3NeqAdnmjjaznqJ5Pvxr9co1KeIXwLwh2D8V5iAn2OA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.2-k3s1/go.mod h1:4qgwPPTQvmc3E4Ub+c6I9LSsdbujYP3pIQEGuIVy8oQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.2-k3s1 h1:n/WaLWgsxyGXZhXatZhr/IkgjKtA1EzMkb5BzkrdTi0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.2-k3s1/go.mod h1:lpK+uXhJTVOwW6SDiSQiL0LaQaBktrM23VG489uC/U0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.2-k3s1 h1:LYerVtsgYxss4A0sKIS3j2N+2xu20oXCMdbsCkBTUpI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.2-k3s1/go.mod h1:twd45pbv7psOvyGxI8eABhpeoXWW3bCX6aB5NVS6TXs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.2-k3s1 h1:jHN3qrkSUsukX1sTpxzTAUq1MEC46AQuHIMcnG7lkrE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.2-k3s1/go.mod h1:RHmuC9yMUmS4ypsaBCH2s9PoG2BD29/60QU9yywYWPo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.2-k3s1 h1:uNpw/9zA3wqbs/dzI/4LsH5NkNcDrRwx9J8FldflN4E=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.2-k3s1/go.mod h1:vXBe7m69RSxIR/m6bm820O+WYUJHP9OXtEyTZf3twRo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.2-k3s1 h1:6lvc7kwoDwEEvA7rowR3Iuih1B0bqjbnX/von8qEJ4s=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.2-k3s1/go.mod h1:kEUR4nHaGTacDcHrY2P4IriUdykXxPuwBg7picxa+gk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.2-k3s1 h1:9C7g/2xASpW1g3DrUwtEgN5HfvJfG1PWAnPFeiVufs0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.2-k3s1/go.mod h1:CqfZDv+BcdomYCAbV0kiIw9wyBW1Fnf/iDHBd0jEWW8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.2-k3s1 h1:SraoflBbM/qWSgyWbZhc17yjTVfpVum98RQFd8+nvLk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.2-k3s1/go.mod h1:jR+bJp7erYNUmcS7lWDm404aFVonltWE56LV8CuqKyg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.2-k3s1 h1:dwa2fItb65xx79MBe4MfKxsXJEzNvRX9m5uaT+73OG4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.2-k3s1/go.mod h1:5fTlfaqG0oYYb80kk4fHpqinBBSFEZb3EqHwdvNdiGQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.2-k3s1 h1:jNtAgLZbbfxNgaGcpElOQn2iYqme6+Am8wn+aS711gU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.2-k3s1/go.mod h1:adA9bBHXD+K7tPn7kTpjQ3VcUzK6PFgAdNEofr4fEx4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.2-k3s1 h1:Annj5Do0vlwBPxYLJalo62SSYOIMddfV9K8SFjtLG7Q=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.2-k3s1/go.mod h1:2k5w33TDDiX5MW45QFLk1KqByb4Su/g73InDkoC1xls=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.2-k3s1 h1:hZp0vL1vvN3yeBErM9KGvqN1WZPCklO5/N8IWEJ0UDw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.2-k3s1/go.mod h1:6CzB8GMVD+ZlCXZjHiqccHpC9FFlRTCz+rHd176Lxfc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.2-k3s1 h1:zLmsbM2shEuTesMP0XrS4bTuG3VcsAQZQyirkPTxsO8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.2-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.2-k3s1 h1:kVVCYYK3rFdkRmBX6Ar9Pec7yDxoKDm9so7dNasfFuw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.2-k3s1/go.mod h1:23iT4D9thFRxYGZ9DOa7zQ4ZyrFHyp+Bz5dXiruHNwI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.2-k3s1 h1:tiX4cJnLYUFmMJ1EmagroblJS9DY3x7656kFCNB96m0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.2-k3s1/go.mod h1:jF57QafZUIyhkboHp3NUlPdpozl8NA4uvyG5FdjbEbU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.2-k3s1 h1:gjzp1CdOqhn28eHZosNQ/9FUGTuDfD4cRmy+wgTurxQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.2-k3s1/go.mod h1:xgsxxK8T8eljLwrh87vAyytjDFMbBsrM95vgHrf1OUg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.2-k3s1 h1:V75TONButlOX+CbFk2bTYJFqNP3jvMim88IgOUo9OxA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.2-k3s1/go.mod h1:iv4u51XYDkRdyvp7BBP+KuQ+ZHjEjoCECFVzMJBDGZA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.2-k3s1 h1:jSY7QHwG+/rLAetWY9QI/oNdgTrWF8ue6i+MZ53N8EE=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.2-k3s1/go.mod h1:HZHgmugH8oA4ZxTPt9R8HYJBWEeLDegmaGoPo3HDK3I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.2-k3s1/go.mod h1:tl3x2SPSVsIqnioD4z87jXFemilbRh1EYDm3KirMsjI=
|
||||
github.com/rancher/kubernetes v1.19.3-k3s1 h1:Tfr1qShnWaNGx4kyBSW5A9rvISgHjEg0KRvvZIV5Zpc=
|
||||
github.com/rancher/kubernetes v1.19.3-k3s1/go.mod h1:yhT1/ltQajQsha3tnYc9QPFYSumGM45nlZdjf7WqE1A=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1 h1:+C1BPPjbCfFFcStBNUJ1gqIDYxdkvbKuZXm3CTQXFxY=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1/go.mod h1:Y4VjjNur38HL6/QxaTVK2yno1zjEQlvcvwbbRQs2DtQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1 h1:+KJuGNziYsqEW83VADyz9kjc+ekmpktzqdPYnEmxtss=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1/go.mod h1:BvtZU215FgO19Oy19K6h8qwajFfjxYqGewgjuYHWGRw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1 h1:2NbzNTnTh8I8emvASDspv4dPwUBmbAC7aitpuYp32rM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1/go.mod h1:4qgwPPTQvmc3E4Ub+c6I9LSsdbujYP3pIQEGuIVy8oQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1 h1:a6syigjFFawAUcEFeeXhRwaBG9Tl03hK4yC3MT6JUmQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1/go.mod h1:lpK+uXhJTVOwW6SDiSQiL0LaQaBktrM23VG489uC/U0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1 h1:oh0VerzIG3uYnFLExwHnMYK0GF5O7vVQWZAi7FDE0x4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1/go.mod h1:twd45pbv7psOvyGxI8eABhpeoXWW3bCX6aB5NVS6TXs=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1 h1:LBUD+DgcZsFqKzQtm4LLAj/e4qaPVepQNGT4PGCcFl0=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1/go.mod h1:RHmuC9yMUmS4ypsaBCH2s9PoG2BD29/60QU9yywYWPo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1 h1:qEJ4M89Ov58KQnE9fJbiwa6423BQ2rdECc8+raY86jw=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1/go.mod h1:vXBe7m69RSxIR/m6bm820O+WYUJHP9OXtEyTZf3twRo=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1 h1:iOMP0NuFsbKZN1ndPQE+We9ssgUevqyJ/8mMDUiGrJM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1/go.mod h1:kEUR4nHaGTacDcHrY2P4IriUdykXxPuwBg7picxa+gk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1 h1:S3BTDilXxRnRpPGYMYIocfFNXyVpHREhmy8eJYIz6dk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1/go.mod h1:CqfZDv+BcdomYCAbV0kiIw9wyBW1Fnf/iDHBd0jEWW8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1 h1:tMyMGhwLnL3mimx9YN8T6uuPAZyv0hmYOdTgWG3Xi04=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1/go.mod h1:jR+bJp7erYNUmcS7lWDm404aFVonltWE56LV8CuqKyg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1 h1:1eeJYqrjk28d+60wR+5/tmrAzz2OybjohX1T9QRFVwg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1/go.mod h1:5fTlfaqG0oYYb80kk4fHpqinBBSFEZb3EqHwdvNdiGQ=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1 h1:RxVX1nPPdVfZVP/Viz3n1rd9/zU6CQICAhfNgDwNzN4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1/go.mod h1:adA9bBHXD+K7tPn7kTpjQ3VcUzK6PFgAdNEofr4fEx4=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1 h1:ZMQsAv6ib3rOvRS0eeuG7IpNfjH6V4Jb0Xt3rwFo/5M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1/go.mod h1:2k5w33TDDiX5MW45QFLk1KqByb4Su/g73InDkoC1xls=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1 h1:ceFawy3+DdCrTCuIr90VJU+ALvnUdOxTewgngJWXkmI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1/go.mod h1:6CzB8GMVD+ZlCXZjHiqccHpC9FFlRTCz+rHd176Lxfc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1 h1:xwYRs1y6M993NcixnQPlv8p++T7mzCUGGcMuH0GBOh8=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1/go.mod h1:PWMBqO9xuXWJS8REJ8QWiouJzbiOwVVVT81ZTwYb2Nk=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1 h1:gv249AZOI7rNXj0ODJB2sqwirYG4+4Rczp3RIltGwWc=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1/go.mod h1:23iT4D9thFRxYGZ9DOa7zQ4ZyrFHyp+Bz5dXiruHNwI=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1 h1:/YLM38Ve+OsLaVwli6uSn/CnisDRYS6KmDBMO/IC4nM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1/go.mod h1:jF57QafZUIyhkboHp3NUlPdpozl8NA4uvyG5FdjbEbU=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1 h1:nQN+E86Ar9omkk3fSXHQREEpAW/lUHAECzU8jxy3iyM=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1/go.mod h1:xgsxxK8T8eljLwrh87vAyytjDFMbBsrM95vgHrf1OUg=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1 h1:9q1Ek7H0/ESteqgdeAYvnonnNeKfii8xrD7eHtLcR9M=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1/go.mod h1:iv4u51XYDkRdyvp7BBP+KuQ+ZHjEjoCECFVzMJBDGZA=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1 h1:doNDlT3AR7qoN0l3DOqSQeXhZ1si7lKsa/smvPlXX20=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1/go.mod h1:HZHgmugH8oA4ZxTPt9R8HYJBWEeLDegmaGoPo3HDK3I=
|
||||
github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.3-k3s1/go.mod h1:tl3x2SPSVsIqnioD4z87jXFemilbRh1EYDm3KirMsjI=
|
||||
github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U=
|
||||
github.com/rancher/nocode v0.0.0-20200630202308-cb097102c09f/go.mod h1:iAAt6Amgbysi6srDJs9SxGSbG2j/JSRb/xCrnEtA69g=
|
||||
github.com/rancher/remotedialer v0.2.0 h1:xD7t3K6JYwTdAsxmGtTHQMkEkFgKouQ1foLxVW424Dc=
|
||||
|
16
vendor/k8s.io/api/apps/v1beta1/types.go
generated
vendored
16
vendor/k8s.io/api/apps/v1beta1/types.go
generated
vendored
@ -58,7 +58,7 @@ type ScaleStatus struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
@ -81,7 +81,7 @@ type Scale struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.5
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSet
|
||||
|
||||
// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for
|
||||
@ -284,7 +284,7 @@ type StatefulSetCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.5
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSetList
|
||||
|
||||
// StatefulSetList is a collection of StatefulSets.
|
||||
@ -299,7 +299,7 @@ type StatefulSetList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment
|
||||
|
||||
// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
|
||||
@ -373,7 +373,7 @@ type DeploymentSpec struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentRollback
|
||||
|
||||
// DEPRECATED.
|
||||
@ -534,7 +534,7 @@ type DeploymentCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.6
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList
|
||||
|
||||
// DeploymentList is a list of Deployments.
|
||||
@ -552,7 +552,7 @@ type DeploymentList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.7
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevision
|
||||
|
||||
// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the
|
||||
@ -583,7 +583,7 @@ type ControllerRevision struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.7
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevisionList
|
||||
|
||||
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
|
||||
|
16
vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go
generated
vendored
16
vendor/k8s.io/api/apps/v1beta1/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -45,7 +45,7 @@ func (in *ControllerRevision) APILifecycleReplacement() schema.GroupVersionKind
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ControllerRevision) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -69,7 +69,7 @@ func (in *ControllerRevisionList) APILifecycleReplacement() schema.GroupVersionK
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ControllerRevisionList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -93,7 +93,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *Deployment) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -117,7 +117,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DeploymentList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -141,7 +141,7 @@ func (in *DeploymentRollback) APILifecycleReplacement() schema.GroupVersionKind
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DeploymentRollback) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -165,7 +165,7 @@ func (in *Scale) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *Scale) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -189,7 +189,7 @@ func (in *StatefulSet) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *StatefulSet) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -213,5 +213,5 @@ func (in *StatefulSetList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *StatefulSetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
22
vendor/k8s.io/api/apps/v1beta2/types.go
generated
vendored
22
vendor/k8s.io/api/apps/v1beta2/types.go
generated
vendored
@ -60,7 +60,7 @@ type ScaleStatus struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=autoscaling,v1,Scale
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
@ -85,7 +85,7 @@ type Scale struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSet
|
||||
|
||||
// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for
|
||||
@ -292,7 +292,7 @@ type StatefulSetCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,StatefulSetList
|
||||
|
||||
// StatefulSetList is a collection of StatefulSets.
|
||||
@ -307,7 +307,7 @@ type StatefulSetList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment
|
||||
|
||||
// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for
|
||||
@ -510,7 +510,7 @@ type DeploymentCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList
|
||||
|
||||
// DeploymentList is a list of Deployments.
|
||||
@ -681,7 +681,7 @@ type DaemonSetCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSet
|
||||
|
||||
// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for
|
||||
@ -718,7 +718,7 @@ const (
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSetList
|
||||
|
||||
// DaemonSetList is a collection of daemon sets.
|
||||
@ -737,7 +737,7 @@ type DaemonSetList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSet
|
||||
|
||||
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for
|
||||
@ -769,7 +769,7 @@ type ReplicaSet struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSetList
|
||||
|
||||
// ReplicaSetList is a collection of ReplicaSets.
|
||||
@ -873,7 +873,7 @@ type ReplicaSetCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevision
|
||||
|
||||
// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the
|
||||
@ -904,7 +904,7 @@ type ControllerRevision struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ControllerRevisionList
|
||||
|
||||
// ControllerRevisionList is a resource containing a list of ControllerRevision objects.
|
||||
|
22
vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go
generated
vendored
22
vendor/k8s.io/api/apps/v1beta2/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -45,7 +45,7 @@ func (in *ControllerRevision) APILifecycleReplacement() schema.GroupVersionKind
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ControllerRevision) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -69,7 +69,7 @@ func (in *ControllerRevisionList) APILifecycleReplacement() schema.GroupVersionK
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ControllerRevisionList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -93,7 +93,7 @@ func (in *DaemonSet) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DaemonSet) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -117,7 +117,7 @@ func (in *DaemonSetList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DaemonSetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -141,7 +141,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *Deployment) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -165,7 +165,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DeploymentList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -189,7 +189,7 @@ func (in *ReplicaSet) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ReplicaSet) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -213,7 +213,7 @@ func (in *ReplicaSetList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ReplicaSetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -237,7 +237,7 @@ func (in *Scale) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *Scale) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -261,7 +261,7 @@ func (in *StatefulSet) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *StatefulSet) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -285,5 +285,5 @@ func (in *StatefulSetList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *StatefulSetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
24
vendor/k8s.io/api/extensions/v1beta1/types.go
generated
vendored
24
vendor/k8s.io/api/extensions/v1beta1/types.go
generated
vendored
@ -52,7 +52,7 @@ type ScaleStatus struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.2
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
|
||||
// represents a scaling request for a resource.
|
||||
type Scale struct {
|
||||
@ -76,7 +76,7 @@ type Scale struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,Deployment
|
||||
|
||||
// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
|
||||
@ -153,7 +153,7 @@ type DeploymentSpec struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
|
||||
// DEPRECATED.
|
||||
// DeploymentRollback stores the information required to rollback a deployment.
|
||||
@ -313,7 +313,7 @@ type DeploymentCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DeploymentList
|
||||
|
||||
// DeploymentList is a list of Deployments.
|
||||
@ -491,7 +491,7 @@ type DaemonSetCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSet
|
||||
|
||||
// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for
|
||||
@ -534,7 +534,7 @@ const (
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.1
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,DaemonSetList
|
||||
|
||||
// DaemonSetList is a collection of daemon sets.
|
||||
@ -808,7 +808,7 @@ type IngressBackend struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSet
|
||||
|
||||
// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
|
||||
@ -840,7 +840,7 @@ type ReplicaSet struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=apps,v1,ReplicaSetList
|
||||
|
||||
// ReplicaSetList is a collection of ReplicaSets.
|
||||
@ -946,7 +946,7 @@ type ReplicaSetCondition struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.11
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicy
|
||||
|
||||
// PodSecurityPolicy governs the ability to make requests that affect the Security Context
|
||||
@ -1308,7 +1308,7 @@ const AllowAllRuntimeClassNames = "*"
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.2
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.11
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=policy,v1beta1,PodSecurityPolicyList
|
||||
|
||||
// PodSecurityPolicyList is a list of PodSecurityPolicy objects.
|
||||
@ -1328,7 +1328,7 @@ type PodSecurityPolicyList struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.3
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=networking.k8s.io,v1,NetworkPolicy
|
||||
|
||||
// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy.
|
||||
@ -1502,7 +1502,7 @@ type NetworkPolicyPeer struct {
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.3
|
||||
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.18
|
||||
// +k8s:prerelease-lifecycle-gen:removed=1.16
|
||||
// +k8s:prerelease-lifecycle-gen:replacement=networking.k8s.io,v1,NetworkPolicyList
|
||||
|
||||
// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList.
|
||||
|
24
vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go
generated
vendored
24
vendor/k8s.io/api/extensions/v1beta1/zz_generated.prerelease-lifecycle.go
generated
vendored
@ -45,7 +45,7 @@ func (in *DaemonSet) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DaemonSet) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -69,7 +69,7 @@ func (in *DaemonSetList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DaemonSetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -93,7 +93,7 @@ func (in *Deployment) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *Deployment) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -117,7 +117,7 @@ func (in *DeploymentList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DeploymentList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -135,7 +135,7 @@ func (in *DeploymentRollback) APILifecycleDeprecated() (major, minor int) {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *DeploymentRollback) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -207,7 +207,7 @@ func (in *NetworkPolicy) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *NetworkPolicy) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -231,7 +231,7 @@ func (in *NetworkPolicyList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *NetworkPolicyList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -255,7 +255,7 @@ func (in *PodSecurityPolicy) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PodSecurityPolicy) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -279,7 +279,7 @@ func (in *PodSecurityPolicyList) APILifecycleReplacement() schema.GroupVersionKi
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *PodSecurityPolicyList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -303,7 +303,7 @@ func (in *ReplicaSet) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ReplicaSet) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -327,7 +327,7 @@ func (in *ReplicaSetList) APILifecycleReplacement() schema.GroupVersionKind {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ReplicaSetList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
@ -345,5 +345,5 @@ func (in *Scale) APILifecycleDeprecated() (major, minor int) {
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *Scale) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 18
|
||||
return 1, 16
|
||||
}
|
||||
|
8
vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go
generated
vendored
8
vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go
generated
vendored
@ -452,6 +452,14 @@ func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Obje
|
||||
|
||||
func (s *store) Count(key string) (int64, error) {
|
||||
key = path.Join(s.pathPrefix, key)
|
||||
|
||||
// We need to make sure the key ended with "/" so that we only get children "directories".
|
||||
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
|
||||
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
|
||||
if !strings.HasSuffix(key, "/") {
|
||||
key += "/"
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
|
||||
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "19"
|
||||
gitVersion = "v1.19.2-k3s1"
|
||||
gitCommit = "fea2193171edbfb530d85448c11f58591ad7825c"
|
||||
gitVersion = "v1.19.3-k3s1"
|
||||
gitCommit = "a38be968fd7010255807381f6d846c4082a838db"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-09-17T19:22:02Z"
|
||||
buildDate = "2020-10-14T17:54:23Z"
|
||||
)
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "19"
|
||||
gitVersion = "v1.19.2-k3s1"
|
||||
gitCommit = "fea2193171edbfb530d85448c11f58591ad7825c"
|
||||
gitVersion = "v1.19.3-k3s1"
|
||||
gitCommit = "a38be968fd7010255807381f6d846c4082a838db"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2020-09-17T19:22:02Z"
|
||||
buildDate = "2020-10-14T17:54:23Z"
|
||||
)
|
||||
|
9
vendor/k8s.io/csi-translation-lib/plugins/azure_file.go
generated
vendored
9
vendor/k8s.io/csi-translation-lib/plugins/azure_file.go
generated
vendored
@ -119,8 +119,10 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
||||
accountName = azureSource.SecretName
|
||||
}
|
||||
resourceGroup := ""
|
||||
if v, ok := pv.ObjectMeta.Annotations[resourceGroupAnnotation]; ok {
|
||||
resourceGroup = v
|
||||
if pv.ObjectMeta.Annotations != nil {
|
||||
if v, ok := pv.ObjectMeta.Annotations[resourceGroupAnnotation]; ok {
|
||||
resourceGroup = v
|
||||
}
|
||||
}
|
||||
volumeID := fmt.Sprintf(volumeIDTemplate, resourceGroup, accountName, azureSource.ShareName, "")
|
||||
|
||||
@ -183,6 +185,9 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
|
||||
pv.Spec.CSI = nil
|
||||
pv.Spec.AzureFile = azureSource
|
||||
if resourceGroup != "" {
|
||||
if pv.ObjectMeta.Annotations == nil {
|
||||
pv.ObjectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
pv.ObjectMeta.Annotations[resourceGroupAnnotation] = resourceGroup
|
||||
}
|
||||
|
||||
|
7
vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go
generated
vendored
7
vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go
generated
vendored
@ -206,6 +206,13 @@ func isLess(i, j reflect.Value) (bool, error) {
|
||||
return true, nil
|
||||
|
||||
case reflect.Interface:
|
||||
if i.IsNil() && j.IsNil() {
|
||||
return false, nil
|
||||
} else if i.IsNil() {
|
||||
return true, nil
|
||||
} else if j.IsNil() {
|
||||
return false, nil
|
||||
}
|
||||
switch itype := i.Interface().(type) {
|
||||
case uint8:
|
||||
if jtype, ok := j.Interface().(uint8); ok {
|
||||
|
18
vendor/k8s.io/kubectl/pkg/cmd/portforward/portforward.go
generated
vendored
18
vendor/k8s.io/kubectl/pkg/cmd/portforward/portforward.go
generated
vendored
@ -260,35 +260,37 @@ func checkUDPPorts(udpOnlyPorts sets.Int, ports []string, obj metav1.Object) err
|
||||
// checkUDPPortInService returns an error if remote port in Service is a UDP port
|
||||
// TODO: remove this check after #47862 is solved
|
||||
func checkUDPPortInService(ports []string, svc *corev1.Service) error {
|
||||
udpOnlyPorts := sets.NewInt()
|
||||
udpPorts := sets.NewInt()
|
||||
tcpPorts := sets.NewInt()
|
||||
for _, port := range svc.Spec.Ports {
|
||||
portNum := int(port.Port)
|
||||
switch port.Protocol {
|
||||
case corev1.ProtocolUDP:
|
||||
udpOnlyPorts.Insert(portNum)
|
||||
udpPorts.Insert(portNum)
|
||||
case corev1.ProtocolTCP:
|
||||
udpOnlyPorts.Delete(portNum)
|
||||
tcpPorts.Insert(portNum)
|
||||
}
|
||||
}
|
||||
return checkUDPPorts(udpOnlyPorts, ports, svc)
|
||||
return checkUDPPorts(udpPorts.Difference(tcpPorts), ports, svc)
|
||||
}
|
||||
|
||||
// checkUDPPortInPod returns an error if remote port in Pod is a UDP port
|
||||
// TODO: remove this check after #47862 is solved
|
||||
func checkUDPPortInPod(ports []string, pod *corev1.Pod) error {
|
||||
udpOnlyPorts := sets.NewInt()
|
||||
udpPorts := sets.NewInt()
|
||||
tcpPorts := sets.NewInt()
|
||||
for _, ct := range pod.Spec.Containers {
|
||||
for _, ctPort := range ct.Ports {
|
||||
portNum := int(ctPort.ContainerPort)
|
||||
switch ctPort.Protocol {
|
||||
case corev1.ProtocolUDP:
|
||||
udpOnlyPorts.Insert(portNum)
|
||||
udpPorts.Insert(portNum)
|
||||
case corev1.ProtocolTCP:
|
||||
udpOnlyPorts.Delete(portNum)
|
||||
tcpPorts.Insert(portNum)
|
||||
}
|
||||
}
|
||||
}
|
||||
return checkUDPPorts(udpOnlyPorts, ports, pod)
|
||||
return checkUDPPorts(udpPorts.Difference(tcpPorts), ports, pod)
|
||||
}
|
||||
|
||||
// Complete completes all the required options for port-forward cmd.
|
||||
|
42
vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/api/endpoints/BUILD
generated
vendored
@ -1,42 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["util.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/api/endpoints",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["util_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
vendor/k8s.io/kubernetes/pkg/api/endpoints/OWNERS
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/api/endpoints/OWNERS
generated
vendored
@ -1,8 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- mikedanese
|
||||
- sttts
|
||||
- resouer
|
235
vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go
generated
vendored
235
vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go
generated
vendored
@ -1,235 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full
|
||||
// representation, and then repacks that into the canonical layout. This
|
||||
// ensures that code which operates on these objects can rely on the common
|
||||
// form for things like comparison. The result is a newly allocated slice.
|
||||
func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
|
||||
// First map each unique port definition to the sets of hosts that
|
||||
// offer it.
|
||||
allAddrs := map[addressKey]*api.EndpointAddress{}
|
||||
portToAddrReadyMap := map[api.EndpointPort]addressSet{}
|
||||
for i := range subsets {
|
||||
if len(subsets[i].Ports) == 0 {
|
||||
// Don't discard endpoints with no ports defined, use a sentinel.
|
||||
mapAddressesByPort(&subsets[i], api.EndpointPort{Port: -1}, allAddrs, portToAddrReadyMap)
|
||||
} else {
|
||||
for _, port := range subsets[i].Ports {
|
||||
mapAddressesByPort(&subsets[i], port, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, map the sets of hosts to the sets of ports they offer.
|
||||
// Go does not allow maps or slices as keys to maps, so we have
|
||||
// to synthesize an artificial key and do a sort of 2-part
|
||||
// associative entity.
|
||||
type keyString string
|
||||
keyToAddrReadyMap := map[keyString]addressSet{}
|
||||
addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{}
|
||||
for port, addrs := range portToAddrReadyMap {
|
||||
key := keyString(hashAddresses(addrs))
|
||||
keyToAddrReadyMap[key] = addrs
|
||||
if port.Port > 0 { // avoid sentinels
|
||||
addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port)
|
||||
} else {
|
||||
if _, found := addrReadyMapKeyToPorts[key]; !found {
|
||||
// Force it to be present in the map
|
||||
addrReadyMapKeyToPorts[key] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, build the N-to-M association the API wants.
|
||||
final := []api.EndpointSubset{}
|
||||
for key, ports := range addrReadyMapKeyToPorts {
|
||||
var readyAddrs, notReadyAddrs []api.EndpointAddress
|
||||
for addr, ready := range keyToAddrReadyMap[key] {
|
||||
if ready {
|
||||
readyAddrs = append(readyAddrs, *addr)
|
||||
} else {
|
||||
notReadyAddrs = append(notReadyAddrs, *addr)
|
||||
}
|
||||
}
|
||||
final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports})
|
||||
}
|
||||
|
||||
// Finally, sort it.
|
||||
return SortSubsets(final)
|
||||
}
|
||||
|
||||
// The sets of hosts must be de-duped, using IP+UID as the key.
|
||||
type addressKey struct {
|
||||
ip string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// mapAddressesByPort adds all ready and not-ready addresses into a map by a single port.
|
||||
func mapAddressesByPort(subset *api.EndpointSubset, port api.EndpointPort, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) {
|
||||
for k := range subset.Addresses {
|
||||
mapAddressByPort(&subset.Addresses[k], port, true, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
for k := range subset.NotReadyAddresses {
|
||||
mapAddressByPort(&subset.NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap)
|
||||
}
|
||||
}
|
||||
|
||||
// mapAddressByPort adds one address into a map by port, registering the address with a unique pointer, and preserving
|
||||
// any existing ready state.
|
||||
func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress {
|
||||
// use addressKey to distinguish between two endpoints that are identical addresses
|
||||
// but may have come from different hosts, for attribution.
|
||||
key := addressKey{ip: addr.IP}
|
||||
if addr.TargetRef != nil {
|
||||
key.uid = addr.TargetRef.UID
|
||||
}
|
||||
|
||||
// Accumulate the address. The full EndpointAddress structure is preserved for use when
|
||||
// we rebuild the subsets so that the final TargetRef has all of the necessary data.
|
||||
existingAddress := allAddrs[key]
|
||||
if existingAddress == nil {
|
||||
// Make a copy so we don't write to the
|
||||
// input args of this function.
|
||||
existingAddress = &api.EndpointAddress{}
|
||||
*existingAddress = *addr
|
||||
allAddrs[key] = existingAddress
|
||||
}
|
||||
|
||||
// Remember that this port maps to this address.
|
||||
if _, found := portToAddrReadyMap[port]; !found {
|
||||
portToAddrReadyMap[port] = addressSet{}
|
||||
}
|
||||
// if we have not yet recorded this port for this address, or if the previous
|
||||
// state was ready, write the current ready state. not ready always trumps
|
||||
// ready.
|
||||
if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady {
|
||||
portToAddrReadyMap[port][existingAddress] = ready
|
||||
}
|
||||
return existingAddress
|
||||
}
|
||||
|
||||
type addressSet map[*api.EndpointAddress]bool
|
||||
|
||||
type addrReady struct {
|
||||
addr *api.EndpointAddress
|
||||
ready bool
|
||||
}
|
||||
|
||||
func hashAddresses(addrs addressSet) string {
|
||||
// Flatten the list of addresses into a string so it can be used as a
|
||||
// map key. Unfortunately, DeepHashObject is implemented in terms of
|
||||
// spew, and spew does not handle non-primitive map keys well. So
|
||||
// first we collapse it into a slice, sort the slice, then hash that.
|
||||
slice := make([]addrReady, 0, len(addrs))
|
||||
for k, ready := range addrs {
|
||||
slice = append(slice, addrReady{k, ready})
|
||||
}
|
||||
sort.Sort(addrsReady(slice))
|
||||
hasher := md5.New()
|
||||
hashutil.DeepHashObject(hasher, slice)
|
||||
return hex.EncodeToString(hasher.Sum(nil)[0:])
|
||||
}
|
||||
|
||||
func lessAddrReady(a, b addrReady) bool {
|
||||
// ready is not significant to hashing since we can't have duplicate addresses
|
||||
return LessEndpointAddress(a.addr, b.addr)
|
||||
}
|
||||
|
||||
type addrsReady []addrReady
|
||||
|
||||
func (sl addrsReady) Len() int { return len(sl) }
|
||||
func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsReady) Less(i, j int) bool {
|
||||
return lessAddrReady(sl[i], sl[j])
|
||||
}
|
||||
|
||||
// LessEndpointAddress compares IP addresses lexicographically and returns true if first argument is lesser than second
|
||||
func LessEndpointAddress(a, b *api.EndpointAddress) bool {
|
||||
ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP))
|
||||
if ipComparison != 0 {
|
||||
return ipComparison < 0
|
||||
}
|
||||
if b.TargetRef == nil {
|
||||
return false
|
||||
}
|
||||
if a.TargetRef == nil {
|
||||
return true
|
||||
}
|
||||
return a.TargetRef.UID < b.TargetRef.UID
|
||||
}
|
||||
|
||||
// SortSubsets sorts an array of EndpointSubset objects in place. For ease of
|
||||
// use it returns the input slice.
|
||||
func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset {
|
||||
for i := range subsets {
|
||||
ss := &subsets[i]
|
||||
sort.Sort(addrsByIPAndUID(ss.Addresses))
|
||||
sort.Sort(addrsByIPAndUID(ss.NotReadyAddresses))
|
||||
sort.Sort(portsByHash(ss.Ports))
|
||||
}
|
||||
sort.Sort(subsetsByHash(subsets))
|
||||
return subsets
|
||||
}
|
||||
|
||||
func hashObject(hasher hash.Hash, obj interface{}) []byte {
|
||||
hashutil.DeepHashObject(hasher, obj)
|
||||
return hasher.Sum(nil)
|
||||
}
|
||||
|
||||
type subsetsByHash []api.EndpointSubset
|
||||
|
||||
func (sl subsetsByHash) Len() int { return len(sl) }
|
||||
func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl subsetsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
||||
|
||||
type addrsByIPAndUID []api.EndpointAddress
|
||||
|
||||
func (sl addrsByIPAndUID) Len() int { return len(sl) }
|
||||
func (sl addrsByIPAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl addrsByIPAndUID) Less(i, j int) bool {
|
||||
return LessEndpointAddress(&sl[i], &sl[j])
|
||||
}
|
||||
|
||||
type portsByHash []api.EndpointPort
|
||||
|
||||
func (sl portsByHash) Len() int { return len(sl) }
|
||||
func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl portsByHash) Less(i, j int) bool {
|
||||
hasher := md5.New()
|
||||
h1 := hashObject(hasher, sl[i])
|
||||
h2 := hashObject(hasher, sl[j])
|
||||
return bytes.Compare(h1, h2) < 0
|
||||
}
|
11
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/endpoint/endpoints_controller.go
generated
vendored
@ -472,9 +472,18 @@ func (e *EndpointController) syncService(key string) error {
|
||||
|
||||
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
|
||||
|
||||
// Compare the sorted subsets and labels
|
||||
// Remove the HeadlessService label from the endpoints if it exists,
|
||||
// as this won't be set on the service itself
|
||||
// and will cause a false negative in this diff check.
|
||||
// But first check if it has that label to avoid expensive copies.
|
||||
compareLabels := currentEndpoints.Labels
|
||||
if _, ok := currentEndpoints.Labels[v1.IsHeadlessService]; ok {
|
||||
compareLabels = utillabels.CloneAndRemoveLabel(currentEndpoints.Labels, v1.IsHeadlessService)
|
||||
}
|
||||
if !createEndpoints &&
|
||||
apiequality.Semantic.DeepEqual(currentEndpoints.Subsets, subsets) &&
|
||||
apiequality.Semantic.DeepEqual(currentEndpoints.Labels, service.Labels) {
|
||||
apiequality.Semantic.DeepEqual(compareLabels, service.Labels) {
|
||||
klog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
|
||||
return nil
|
||||
}
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/controller/endpointslice/reconciler.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/controller/endpointslice/reconciler.go
generated
vendored
@ -70,7 +70,7 @@ func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, exis
|
||||
existingSlicesByPortMap := map[endpointutil.PortMapKey][]*discovery.EndpointSlice{}
|
||||
numExistingEndpoints := 0
|
||||
for _, existingSlice := range existingSlices {
|
||||
if existingSlice.AddressType == addressType {
|
||||
if existingSlice.AddressType == addressType && ownedBy(existingSlice, service) {
|
||||
epHash := endpointutil.NewPortMapKey(existingSlice.Ports)
|
||||
existingSlicesByPortMap[epHash] = append(existingSlicesByPortMap[epHash], existingSlice)
|
||||
numExistingEndpoints += len(existingSlice.Endpoints)
|
||||
@ -187,13 +187,15 @@ func (r *reconciler) finalize(
|
||||
}
|
||||
sliceToDelete := slicesToDelete[i]
|
||||
slice := slicesToCreate[len(slicesToCreate)-1]
|
||||
// Only update EndpointSlices that have the same AddressType as this
|
||||
// field is considered immutable. Since Services also consider IPFamily
|
||||
// immutable, the only case where this should matter will be the
|
||||
// migration from IP to IPv4 and IPv6 AddressTypes, where there's a
|
||||
// Only update EndpointSlices that are owned by this Service and have
|
||||
// the same AddressType. We need to avoid updating EndpointSlices that
|
||||
// are being garbage collected for an old Service with the same name.
|
||||
// The AddressType field is immutable. Since Services also consider
|
||||
// IPFamily immutable, the only case where this should matter will be
|
||||
// the migration from IP to IPv4 and IPv6 AddressTypes, where there's a
|
||||
// chance EndpointSlices with an IP AddressType would otherwise be
|
||||
// updated to IPv4 or IPv6 without this check.
|
||||
if sliceToDelete.AddressType == slice.AddressType {
|
||||
if sliceToDelete.AddressType == slice.AddressType && ownedBy(sliceToDelete, service) {
|
||||
slice.Name = sliceToDelete.Name
|
||||
slicesToCreate = slicesToCreate[:len(slicesToCreate)-1]
|
||||
slicesToUpdate = append(slicesToUpdate, slice)
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/controller/endpointslice/utils.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/controller/endpointslice/utils.go
generated
vendored
@ -201,6 +201,17 @@ func objectRefPtrChanged(ref1, ref2 *corev1.ObjectReference) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ownedBy returns true if the provided EndpointSlice is owned by the provided
|
||||
// Service.
|
||||
func ownedBy(endpointSlice *discovery.EndpointSlice, svc *corev1.Service) bool {
|
||||
for _, o := range endpointSlice.OwnerReferences {
|
||||
if o.UID == svc.UID && o.Kind == "Service" && o.APIVersion == "v1" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getSliceToFill will return the EndpointSlice that will be closest to full
|
||||
// when numEndpoints are added. If no EndpointSlice can be found, a nil pointer
|
||||
// will be returned.
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go
generated
vendored
@ -117,10 +117,14 @@ func ReadDockercfgFile(searchPaths []string) (cfg DockerConfig, err error) {
|
||||
continue
|
||||
}
|
||||
cfg, err := readDockerConfigFileFromBytes(contents)
|
||||
if err == nil {
|
||||
klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation)
|
||||
return cfg, nil
|
||||
if err != nil {
|
||||
klog.V(4).Infof("couldn't get the config from %q contents: %v", absDockerConfigFileLocation, err)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation)
|
||||
return cfg, nil
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", searchPaths)
|
||||
}
|
||||
@ -230,8 +234,7 @@ func ReadDockerConfigFileFromURL(url string, client *http.Client, header *http.H
|
||||
|
||||
func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||
if err = json.Unmarshal(contents, &cfg); err != nil {
|
||||
klog.Errorf("while trying to parse blob %q: %v", contents, err)
|
||||
return nil, err
|
||||
return nil, errors.New("error occurred while trying to unmarshal json")
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -239,8 +242,7 @@ func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error
|
||||
func readDockerConfigJSONFileFromBytes(contents []byte) (cfg DockerConfig, err error) {
|
||||
var cfgJSON DockerConfigJSON
|
||||
if err = json.Unmarshal(contents, &cfgJSON); err != nil {
|
||||
klog.Errorf("while trying to parse blob %q: %v", contents, err)
|
||||
return nil, err
|
||||
return nil, errors.New("error occurred while trying to unmarshal json")
|
||||
}
|
||||
cfg = cfgJSON.Auths
|
||||
return
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/time_cache.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/time_cache.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
|
||||
// timeCache stores a time keyed by uid
|
||||
type timeCache struct {
|
||||
lock sync.RWMutex
|
||||
lock sync.Mutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
@ -53,8 +53,8 @@ func (c *timeCache) Remove(uid types.UID) {
|
||||
}
|
||||
|
||||
func (c *timeCache) Get(uid types.UID) (time.Time, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
value, ok := c.cache.Get(uid)
|
||||
if !ok {
|
||||
return time.Time{}, false
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go
generated
vendored
@ -167,7 +167,10 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool {
|
||||
ect.items[namespacedName] = change
|
||||
}
|
||||
|
||||
if t := getLastChangeTriggerTime(endpoints.Annotations); !t.IsZero() {
|
||||
// In case of Endpoints deletion, the LastChangeTriggerTime annotation is
|
||||
// by-definition coming from the time of last update, which is not what
|
||||
// we want to measure. So we simply ignore it in this cases.
|
||||
if t := getLastChangeTriggerTime(endpoints.Annotations); !t.IsZero() && current != nil {
|
||||
ect.lastChangeTriggerTimes[namespacedName] = append(ect.lastChangeTriggerTimes[namespacedName], t)
|
||||
}
|
||||
|
||||
@ -222,7 +225,12 @@ func (ect *EndpointChangeTracker) EndpointSliceUpdate(endpointSlice *discovery.E
|
||||
|
||||
if changeNeeded {
|
||||
metrics.EndpointChangesPending.Inc()
|
||||
if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() {
|
||||
// In case of Endpoints deletion, the LastChangeTriggerTime annotation is
|
||||
// by-definition coming from the time of last update, which is not what
|
||||
// we want to measure. So we simply ignore it in this cases.
|
||||
// TODO(wojtek-t, robscott): Address the problem for EndpointSlice deletion
|
||||
// when other EndpointSlice for that service still exist.
|
||||
if t := getLastChangeTriggerTime(endpointSlice.Annotations); !t.IsZero() && !removeSlice {
|
||||
ect.lastChangeTriggerTimes[namespacedName] =
|
||||
append(ect.lastChangeTriggerTimes[namespacedName], t)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/BUILD
generated
vendored
@ -13,7 +13,6 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/registry/core/endpoint",
|
||||
deps = [
|
||||
"//pkg/api/endpoints:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/validation:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/strategy.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/registry/core/endpoint/strategy.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
endptspkg "k8s.io/kubernetes/pkg/api/endpoints"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
@ -60,8 +59,6 @@ func (endpointsStrategy) Validate(ctx context.Context, obj runtime.Object) field
|
||||
|
||||
// Canonicalize normalizes the object after validation.
|
||||
func (endpointsStrategy) Canonicalize(obj runtime.Object) {
|
||||
endpoints := obj.(*api.Endpoints)
|
||||
endpoints.Subsets = endptspkg.RepackSubsets(endpoints.Subsets)
|
||||
}
|
||||
|
||||
// AllowCreateOnUpdate is true for endpoints.
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go
generated
vendored
@ -163,25 +163,25 @@ func podMatchesAllAffinityTerms(pod *v1.Pod, terms []framework.AffinityTerm) boo
|
||||
// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node:
|
||||
// (1) Whether it has PodAntiAffinity
|
||||
// (2) Whether any AffinityTerm matches the incoming pod
|
||||
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*framework.NodeInfo) topologyToMatchedTermCount {
|
||||
topoMaps := make([]topologyToMatchedTermCount, len(allNodes))
|
||||
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodes []*framework.NodeInfo) topologyToMatchedTermCount {
|
||||
topoMaps := make([]topologyToMatchedTermCount, len(nodes))
|
||||
index := int32(-1)
|
||||
processNode := func(i int) {
|
||||
nodeInfo := allNodes[i]
|
||||
nodeInfo := nodes[i]
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
klog.Error("node not found")
|
||||
return
|
||||
}
|
||||
topoMap := make(topologyToMatchedTermCount)
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity {
|
||||
for _, existingPod := range nodeInfo.PodsWithRequiredAntiAffinity {
|
||||
topoMap.updateWithAntiAffinityTerms(pod, node, existingPod.RequiredAntiAffinityTerms, 1)
|
||||
}
|
||||
if len(topoMap) != 0 {
|
||||
topoMaps[atomic.AddInt32(&index, 1)] = topoMap
|
||||
}
|
||||
}
|
||||
parallelize.Until(context.Background(), len(allNodes), processNode)
|
||||
parallelize.Until(context.Background(), len(nodes), processNode)
|
||||
|
||||
result := make(topologyToMatchedTermCount)
|
||||
for i := 0; i <= int(index); i++ {
|
||||
@ -241,12 +241,12 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(podInfo *framework.PodInfo, al
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
var allNodes []*framework.NodeInfo
|
||||
var havePodsWithAffinityNodes []*framework.NodeInfo
|
||||
var nodesWithRequiredAntiAffinityPods []*framework.NodeInfo
|
||||
var err error
|
||||
if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err))
|
||||
}
|
||||
if havePodsWithAffinityNodes, err = pl.sharedLister.NodeInfos().HavePodsWithAffinityList(); err != nil {
|
||||
if nodesWithRequiredAntiAffinityPods, err = pl.sharedLister.NodeInfos().HavePodsWithRequiredAntiAffinityList(); err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos with pods with affinity: %v", err))
|
||||
}
|
||||
|
||||
@ -256,7 +256,7 @@ func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework
|
||||
}
|
||||
|
||||
// existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity
|
||||
existingPodAntiAffinityMap := getTPMapMatchingExistingAntiAffinity(pod, havePodsWithAffinityNodes)
|
||||
existingPodAntiAffinityMap := getTPMapMatchingExistingAntiAffinity(pod, nodesWithRequiredAntiAffinityPods)
|
||||
|
||||
// incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity
|
||||
// incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/listers.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/listers.go
generated
vendored
@ -22,6 +22,8 @@ type NodeInfoLister interface {
|
||||
List() ([]*NodeInfo, error)
|
||||
// Returns the list of NodeInfos of nodes with pods with affinity terms.
|
||||
HavePodsWithAffinityList() ([]*NodeInfo, error)
|
||||
// Returns the list of NodeInfos of nodes with pods with required anti-affinity terms.
|
||||
HavePodsWithRequiredAntiAffinityList() ([]*NodeInfo, error)
|
||||
// Returns the NodeInfo of the given node name.
|
||||
Get(nodeName string) (*NodeInfo, error)
|
||||
}
|
||||
|
60
vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/types.go
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/types.go
generated
vendored
@ -196,6 +196,9 @@ type NodeInfo struct {
|
||||
// The subset of pods with affinity.
|
||||
PodsWithAffinity []*PodInfo
|
||||
|
||||
// The subset of pods with required anti-affinity.
|
||||
PodsWithRequiredAntiAffinity []*PodInfo
|
||||
|
||||
// Ports allocated on the node.
|
||||
UsedPorts HostPortInfo
|
||||
|
||||
@ -457,6 +460,9 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
||||
if len(n.PodsWithAffinity) > 0 {
|
||||
clone.PodsWithAffinity = append([]*PodInfo(nil), n.PodsWithAffinity...)
|
||||
}
|
||||
if len(n.PodsWithRequiredAntiAffinity) > 0 {
|
||||
clone.PodsWithRequiredAntiAffinity = append([]*PodInfo(nil), n.PodsWithRequiredAntiAffinity...)
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
@ -486,10 +492,12 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||
n.NonZeroRequested.MilliCPU += non0CPU
|
||||
n.NonZeroRequested.Memory += non0Mem
|
||||
n.Pods = append(n.Pods, podInfo)
|
||||
affinity := pod.Spec.Affinity
|
||||
if affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil) {
|
||||
if podWithAffinity(pod) {
|
||||
n.PodsWithAffinity = append(n.PodsWithAffinity, podInfo)
|
||||
}
|
||||
if podWithRequiredAntiAffinity(pod) {
|
||||
n.PodsWithRequiredAntiAffinity = append(n.PodsWithRequiredAntiAffinity, podInfo)
|
||||
}
|
||||
|
||||
// Consume ports when pods added.
|
||||
n.updateUsedPorts(podInfo.Pod, true)
|
||||
@ -497,33 +505,54 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) {
|
||||
n.Generation = nextGeneration()
|
||||
}
|
||||
|
||||
// RemovePod subtracts pod information from this NodeInfo.
|
||||
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
k1, err := GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func podWithAffinity(p *v1.Pod) bool {
|
||||
affinity := p.Spec.Affinity
|
||||
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
|
||||
}
|
||||
|
||||
for i := range n.PodsWithAffinity {
|
||||
k2, err := GetPodKey(n.PodsWithAffinity[i].Pod)
|
||||
func podWithRequiredAntiAffinity(p *v1.Pod) bool {
|
||||
affinity := p.Spec.Affinity
|
||||
return affinity != nil && affinity.PodAntiAffinity != nil &&
|
||||
len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0
|
||||
}
|
||||
|
||||
func removeFromSlice(s []*PodInfo, k string) []*PodInfo {
|
||||
for i := range s {
|
||||
k2, err := GetPodKey(s[i].Pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||
continue
|
||||
}
|
||||
if k1 == k2 {
|
||||
if k == k2 {
|
||||
// delete the element
|
||||
n.PodsWithAffinity[i] = n.PodsWithAffinity[len(n.PodsWithAffinity)-1]
|
||||
n.PodsWithAffinity = n.PodsWithAffinity[:len(n.PodsWithAffinity)-1]
|
||||
s[i] = s[len(s)-1]
|
||||
s = s[:len(s)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// RemovePod subtracts pod information from this NodeInfo.
|
||||
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
|
||||
k, err := GetPodKey(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if podWithAffinity(pod) {
|
||||
n.PodsWithAffinity = removeFromSlice(n.PodsWithAffinity, k)
|
||||
}
|
||||
if podWithRequiredAntiAffinity(pod) {
|
||||
n.PodsWithRequiredAntiAffinity = removeFromSlice(n.PodsWithRequiredAntiAffinity, k)
|
||||
}
|
||||
|
||||
for i := range n.Pods {
|
||||
k2, err := GetPodKey(n.Pods[i].Pod)
|
||||
if err != nil {
|
||||
klog.Errorf("Cannot get pod key, err: %v", err)
|
||||
continue
|
||||
}
|
||||
if k1 == k2 {
|
||||
if k == k2 {
|
||||
// delete the element
|
||||
n.Pods[i] = n.Pods[len(n.Pods)-1]
|
||||
n.Pods = n.Pods[:len(n.Pods)-1]
|
||||
@ -558,6 +587,9 @@ func (n *NodeInfo) resetSlicesIfEmpty() {
|
||||
if len(n.PodsWithAffinity) == 0 {
|
||||
n.PodsWithAffinity = nil
|
||||
}
|
||||
if len(n.PodsWithRequiredAntiAffinity) == 0 {
|
||||
n.PodsWithRequiredAntiAffinity = nil
|
||||
}
|
||||
if len(n.Pods) == 0 {
|
||||
n.Pods = nil
|
||||
}
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/cache.go
generated
vendored
@ -196,6 +196,8 @@ func (cache *schedulerCache) Dump() *Dump {
|
||||
|
||||
// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at
|
||||
// beginning of every scheduling cycle.
|
||||
// The snapshot only includes Nodes that are not deleted at the time this function is called.
|
||||
// nodeinfo.Node() is guaranteed to be not nil for all the nodes in the snapshot.
|
||||
// This function tracks generation number of NodeInfo and updates only the
|
||||
// entries of an existing snapshot that have changed after the snapshot was taken.
|
||||
func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||
@ -213,6 +215,10 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||
// status from having pods with affinity to NOT having pods with affinity or the other
|
||||
// way around.
|
||||
updateNodesHavePodsWithAffinity := false
|
||||
// HavePodsWithRequiredAntiAffinityNodeInfoList must be re-created if a node changed its
|
||||
// status from having pods with required anti-affinity to NOT having pods with required
|
||||
// anti-affinity or the other way around.
|
||||
updateNodesHavePodsWithRequiredAntiAffinity := false
|
||||
|
||||
// Start from the head of the NodeInfo doubly linked list and update snapshot
|
||||
// of NodeInfos updated after the last snapshot.
|
||||
@ -239,6 +245,9 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||
if (len(existing.PodsWithAffinity) > 0) != (len(clone.PodsWithAffinity) > 0) {
|
||||
updateNodesHavePodsWithAffinity = true
|
||||
}
|
||||
if (len(existing.PodsWithRequiredAntiAffinity) > 0) != (len(clone.PodsWithRequiredAntiAffinity) > 0) {
|
||||
updateNodesHavePodsWithRequiredAntiAffinity = true
|
||||
}
|
||||
// We need to preserve the original pointer of the NodeInfo struct since it
|
||||
// is used in the NodeInfoList, which we may not update.
|
||||
*existing = *clone
|
||||
@ -249,12 +258,15 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||
nodeSnapshot.generation = cache.headNode.info.Generation
|
||||
}
|
||||
|
||||
if len(nodeSnapshot.nodeInfoMap) > len(cache.nodes) {
|
||||
// Comparing to pods in nodeTree.
|
||||
// Deleted nodes get removed from the tree, but they might remain in the nodes map
|
||||
// if they still have non-deleted Pods.
|
||||
if len(nodeSnapshot.nodeInfoMap) > cache.nodeTree.numNodes {
|
||||
cache.removeDeletedNodesFromSnapshot(nodeSnapshot)
|
||||
updateAllLists = true
|
||||
}
|
||||
|
||||
if updateAllLists || updateNodesHavePodsWithAffinity {
|
||||
if updateAllLists || updateNodesHavePodsWithAffinity || updateNodesHavePodsWithRequiredAntiAffinity {
|
||||
cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists)
|
||||
}
|
||||
|
||||
@ -276,6 +288,7 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||
|
||||
func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
|
||||
snapshot.havePodsWithAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
if updateAll {
|
||||
// Take a snapshot of the nodes order in the tree
|
||||
snapshot.nodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
@ -287,6 +300,9 @@ func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, upda
|
||||
if len(n.PodsWithAffinity) > 0 {
|
||||
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
||||
}
|
||||
if len(n.PodsWithRequiredAntiAffinity) > 0 {
|
||||
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, n)
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName)
|
||||
}
|
||||
@ -296,18 +312,21 @@ func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, upda
|
||||
if len(n.PodsWithAffinity) > 0 {
|
||||
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
||||
}
|
||||
if len(n.PodsWithRequiredAntiAffinity) > 0 {
|
||||
snapshot.havePodsWithRequiredAntiAffinityNodeInfoList = append(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot.
|
||||
func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot) {
|
||||
toDelete := len(snapshot.nodeInfoMap) - len(cache.nodes)
|
||||
toDelete := len(snapshot.nodeInfoMap) - cache.nodeTree.numNodes
|
||||
for name := range snapshot.nodeInfoMap {
|
||||
if toDelete <= 0 {
|
||||
break
|
||||
}
|
||||
if _, ok := cache.nodes[name]; !ok {
|
||||
if n, ok := cache.nodes[name]; !ok || n.info.Node() == nil {
|
||||
delete(snapshot.nodeInfoMap, name)
|
||||
toDelete--
|
||||
}
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/interface.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
@ -99,6 +99,8 @@ type Cache interface {
|
||||
// UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache.
|
||||
// The node info contains aggregated information of pods scheduled (including assumed to be)
|
||||
// on this node.
|
||||
// The snapshot only includes Nodes that are not deleted at the time this function is called.
|
||||
// nodeinfo.Node() is guaranteed to be not nil for all the nodes in the snapshot.
|
||||
UpdateSnapshot(nodeSnapshot *Snapshot) error
|
||||
|
||||
// Dump produces a dump of the current cache.
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/snapshot.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/scheduler/internal/cache/snapshot.go
generated
vendored
@ -33,7 +33,10 @@ type Snapshot struct {
|
||||
nodeInfoList []*framework.NodeInfo
|
||||
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
|
||||
havePodsWithAffinityNodeInfoList []*framework.NodeInfo
|
||||
generation int64
|
||||
// havePodsWithRequiredAntiAffinityNodeInfoList is the list of nodes with at least one pod declaring
|
||||
// required anti-affinity terms.
|
||||
havePodsWithRequiredAntiAffinityNodeInfoList []*framework.NodeInfo
|
||||
generation int64
|
||||
}
|
||||
|
||||
var _ framework.SharedLister = &Snapshot{}
|
||||
@ -50,17 +53,22 @@ func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
|
||||
nodeInfoMap := createNodeInfoMap(pods, nodes)
|
||||
nodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap))
|
||||
havePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap))
|
||||
havePodsWithRequiredAntiAffinityNodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap))
|
||||
for _, v := range nodeInfoMap {
|
||||
nodeInfoList = append(nodeInfoList, v)
|
||||
if len(v.PodsWithAffinity) > 0 {
|
||||
havePodsWithAffinityNodeInfoList = append(havePodsWithAffinityNodeInfoList, v)
|
||||
}
|
||||
if len(v.PodsWithRequiredAntiAffinity) > 0 {
|
||||
havePodsWithRequiredAntiAffinityNodeInfoList = append(havePodsWithRequiredAntiAffinityNodeInfoList, v)
|
||||
}
|
||||
}
|
||||
|
||||
s := NewEmptySnapshot()
|
||||
s.nodeInfoMap = nodeInfoMap
|
||||
s.nodeInfoList = nodeInfoList
|
||||
s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
|
||||
s.havePodsWithRequiredAntiAffinityNodeInfoList = havePodsWithRequiredAntiAffinityNodeInfoList
|
||||
|
||||
return s
|
||||
}
|
||||
@ -137,11 +145,17 @@ func (s *Snapshot) List() ([]*framework.NodeInfo, error) {
|
||||
return s.nodeInfoList, nil
|
||||
}
|
||||
|
||||
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
|
||||
// HavePodsWithAffinityList returns the list of nodes with at least one pod with inter-pod affinity
|
||||
func (s *Snapshot) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) {
|
||||
return s.havePodsWithAffinityNodeInfoList, nil
|
||||
}
|
||||
|
||||
// HavePodsWithRequiredAntiAffinityList returns the list of nodes with at least one pod with
|
||||
// required inter-pod anti-affinity
|
||||
func (s *Snapshot) HavePodsWithRequiredAntiAffinityList() ([]*framework.NodeInfo, error) {
|
||||
return s.havePodsWithRequiredAntiAffinityNodeInfoList, nil
|
||||
}
|
||||
|
||||
// Get returns the NodeInfo of the given node name.
|
||||
func (s *Snapshot) Get(nodeName string) (*framework.NodeInfo, error) {
|
||||
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/azure/azure.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/azure/azure.go
generated
vendored
@ -248,7 +248,7 @@ type Cloud struct {
|
||||
|
||||
ResourceRequestBackoff wait.Backoff
|
||||
metadata *InstanceMetadataService
|
||||
vmSet VMSet
|
||||
VMSet VMSet
|
||||
|
||||
// ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates
|
||||
ipv6DualStackEnabled bool
|
||||
@ -491,12 +491,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro
|
||||
}
|
||||
|
||||
if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
|
||||
az.vmSet, err = newScaleSet(az)
|
||||
az.VMSet, err = newScaleSet(az)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
az.vmSet = newAvailabilitySet(az)
|
||||
az.VMSet = newAvailabilitySet(az)
|
||||
}
|
||||
|
||||
az.vmCache, err = az.newVMCache()
|
||||
|
4
vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go
generated
vendored
4
vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go
generated
vendored
@ -111,7 +111,7 @@ func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]st
|
||||
var privateIPs []string
|
||||
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
privateIPs, retryErr = az.vmSet.GetPrivateIPsByNodeName(string(nodeName))
|
||||
privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName))
|
||||
if retryErr != nil {
|
||||
// won't retry since the instance doesn't exist on Azure.
|
||||
if retryErr == cloudprovider.InstanceNotFound {
|
||||
@ -135,7 +135,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string,
|
||||
var ip, publicIP string
|
||||
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
|
||||
var retryErr error
|
||||
ip, publicIP, retryErr = az.vmSet.GetIPByNodeName(string(name))
|
||||
ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name))
|
||||
if retryErr != nil {
|
||||
klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
|
||||
return false, nil
|
||||
|
57
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go
generated
vendored
57
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go
generated
vendored
@ -49,7 +49,9 @@ const (
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
errDiskBlobNotFound = "DiskBlobNotFound"
|
||||
errStatusCode400 = "statuscode=400"
|
||||
errInvalidParameter = `code="invalidparameter"`
|
||||
errTargetInstanceIds = `target="instanceids"`
|
||||
sourceSnapshot = "snapshot"
|
||||
sourceVolume = "volume"
|
||||
|
||||
@ -90,15 +92,15 @@ type controllerCommon struct {
|
||||
|
||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
|
||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
|
||||
// 1. vmType is standard, return cloud.vmSet directly.
|
||||
// 1. vmType is standard, return cloud.VMSet directly.
|
||||
if c.cloud.VMType == vmTypeStandard {
|
||||
return c.cloud.vmSet, nil
|
||||
return c.cloud.VMSet, nil
|
||||
}
|
||||
|
||||
// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
|
||||
ss, ok := c.cloud.vmSet.(*scaleSet)
|
||||
ss, ok := c.cloud.VMSet.(*scaleSet)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType)
|
||||
return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType)
|
||||
}
|
||||
|
||||
// 3. If the node is managed by availability set, then return ss.availabilitySet.
|
||||
@ -214,24 +216,32 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
||||
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
||||
|
||||
if err != nil && retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff {
|
||||
klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
||||
retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) {
|
||||
c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
|
||||
c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching")
|
||||
err := vmset.DetachDisk(diskName, diskURI, nodeName)
|
||||
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
||||
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
||||
if err != nil {
|
||||
if isInstanceNotFoundError(err) {
|
||||
// if host doesn't exist, no need to detach
|
||||
klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached",
|
||||
err, diskURI)
|
||||
return nil
|
||||
}
|
||||
if retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff {
|
||||
klog.Warningf("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
||||
retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) {
|
||||
c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
|
||||
c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching")
|
||||
err := vmset.DetachDisk(diskName, diskURI, nodeName)
|
||||
c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
|
||||
c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
|
||||
|
||||
retriable := false
|
||||
if err != nil && retry.IsErrorRetriable(err) {
|
||||
retriable = true
|
||||
retriable := false
|
||||
if err != nil && retry.IsErrorRetriable(err) {
|
||||
retriable = true
|
||||
}
|
||||
return !retriable, err
|
||||
})
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
||||
}
|
||||
return !retriable, err
|
||||
})
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@ -426,3 +436,8 @@ func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc
|
||||
SourceResourceID: &sourceResourceID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isInstanceNotFoundError(err error) bool {
|
||||
errMsg := strings.ToLower(err.Error())
|
||||
return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds)
|
||||
}
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go
generated
vendored
@ -142,7 +142,11 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
|
||||
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
||||
// found the disk
|
||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||
if strings.EqualFold(as.cloud.Environment.Name, "AZURESTACKCLOUD") {
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
} else {
|
||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||
}
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go
generated
vendored
@ -147,7 +147,11 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
|
||||
(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
|
||||
// found the disk
|
||||
klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||
if strings.EqualFold(ss.cloud.Environment.Name, "AZURESTACKCLOUD") {
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
} else {
|
||||
disks[i].ToBeDetached = to.BoolPtr(true)
|
||||
}
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
|
2
vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go
generated
vendored
2
vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go
generated
vendored
@ -83,7 +83,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
|
||||
az.VirtualMachineScaleSetsClient = mockvmssclient.NewMockInterface(ctrl)
|
||||
az.VirtualMachineScaleSetVMsClient = mockvmssvmclient.NewMockInterface(ctrl)
|
||||
az.VirtualMachinesClient = mockvmclient.NewMockInterface(ctrl)
|
||||
az.vmSet = newAvailabilitySet(az)
|
||||
az.VMSet = newAvailabilitySet(az)
|
||||
az.vmCache, _ = az.newVMCache()
|
||||
az.lbCache, _ = az.newLBCache()
|
||||
az.nsgCache, _ = az.newNSGCache()
|
||||
|
24
vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go
generated
vendored
24
vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go
generated
vendored
@ -95,7 +95,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
|
||||
|
||||
// Not local instance, get addresses from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
if az.vmSet != nil {
|
||||
if az.VMSet != nil {
|
||||
return az.addressGetter(name)
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -189,7 +189,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri
|
||||
return true, nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
return false, nil
|
||||
@ -214,7 +214,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
|
||||
return false, nil
|
||||
}
|
||||
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
@ -224,7 +224,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
|
||||
return false, err
|
||||
}
|
||||
|
||||
powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName))
|
||||
powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName))
|
||||
if err != nil {
|
||||
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
@ -292,8 +292,8 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
|
||||
// Not local instance, get instanceID from Azure ARM API.
|
||||
if !isLocalInstance {
|
||||
if az.vmSet != nil {
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
if az.VMSet != nil {
|
||||
return az.VMSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
// vmSet == nil indicates credentials are not provided.
|
||||
@ -302,7 +302,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
|
||||
return az.getLocalInstanceProviderID(metadata, nodeName)
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceIDByNodeName(nodeName)
|
||||
return az.VMSet.GetInstanceIDByNodeName(nodeName)
|
||||
}
|
||||
|
||||
func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName string) (string, error) {
|
||||
@ -342,7 +342,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string
|
||||
return "", nil
|
||||
}
|
||||
|
||||
name, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
name, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -380,8 +380,8 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||
return "", err
|
||||
}
|
||||
if !isLocalInstance {
|
||||
if az.vmSet != nil {
|
||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
||||
if az.VMSet != nil {
|
||||
return az.VMSet.GetInstanceTypeByNodeName(string(name))
|
||||
}
|
||||
|
||||
// vmSet == nil indicates credentials are not provided.
|
||||
@ -393,7 +393,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
|
||||
}
|
||||
}
|
||||
|
||||
return az.vmSet.GetInstanceTypeByNodeName(string(name))
|
||||
return az.VMSet.GetInstanceTypeByNodeName(string(name))
|
||||
}
|
||||
|
||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
|
||||
|
13
vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
generated
vendored
13
vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
generated
vendored
@ -36,6 +36,7 @@ import (
|
||||
servicehelpers "k8s.io/cloud-provider/service/helpers"
|
||||
"k8s.io/klog/v2"
|
||||
azcache "k8s.io/legacy-cloud-providers/azure/cache"
|
||||
"k8s.io/legacy-cloud-providers/azure/retry"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
@ -216,7 +217,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
klog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName)
|
||||
|
||||
serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal)
|
||||
if err != nil {
|
||||
if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -225,7 +226,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil {
|
||||
if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -258,7 +259,7 @@ func (az *Cloud) getLoadBalancerResourceGroup() string {
|
||||
func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
var defaultLB *network.LoadBalancer
|
||||
primaryVMSetName := az.vmSet.GetPrimaryVMSetName()
|
||||
primaryVMSetName := az.VMSet.GetPrimaryVMSetName()
|
||||
defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal)
|
||||
|
||||
existingLBs, err := az.ListLB(service)
|
||||
@ -329,7 +330,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi
|
||||
isInternal := requiresInternalLoadBalancer(service)
|
||||
serviceName := getServiceName(service)
|
||||
klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal)
|
||||
vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes)
|
||||
vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes)
|
||||
if err != nil {
|
||||
klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err)
|
||||
return nil, false, err
|
||||
@ -935,7 +936,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
// Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB.
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName)
|
||||
err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
|
||||
if err != nil {
|
||||
klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err)
|
||||
return nil, err
|
||||
@ -979,7 +980,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
|
||||
vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName)
|
||||
// Etag would be changed when updating backend pools, so invalidate lbCache after it.
|
||||
defer az.lbCache.Delete(lbName)
|
||||
err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||
err := az.VMSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
8
vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go
generated
vendored
8
vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go
generated
vendored
@ -297,11 +297,15 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
|
||||
return newSizeQuant, nil
|
||||
}
|
||||
|
||||
result.DiskProperties.DiskSizeGB = &requestGiB
|
||||
diskParameter := compute.DiskUpdate{
|
||||
DiskUpdateProperties: &compute.DiskUpdateProperties{
|
||||
DiskSizeGB: &requestGiB,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel = getContextWithCancel()
|
||||
defer cancel()
|
||||
if rerr := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); rerr != nil {
|
||||
if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil {
|
||||
return oldSize, rerr.Error()
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go
generated
vendored
@ -135,7 +135,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string {
|
||||
func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) {
|
||||
vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix)
|
||||
if strings.EqualFold(clusterName, vmSetName) {
|
||||
vmSetName = az.vmSet.GetPrimaryVMSetName()
|
||||
vmSetName = az.VMSet.GetPrimaryVMSetName()
|
||||
}
|
||||
|
||||
return vmSetName
|
||||
@ -150,7 +150,7 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string,
|
||||
clusterName = az.LoadBalancerName
|
||||
}
|
||||
lbNamePrefix := vmSetName
|
||||
if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() {
|
||||
lbNamePrefix = clusterName
|
||||
}
|
||||
if isInternal {
|
||||
@ -732,7 +732,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types.
|
||||
return "", "", "", nil, nil
|
||||
}
|
||||
|
||||
klog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
klog.Errorf("error: az.EnsureHostInPool(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err)
|
||||
return "", "", "", nil, err
|
||||
}
|
||||
|
||||
|
11
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
11
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
@ -534,17 +534,21 @@ func (ss *scaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) {
|
||||
|
||||
// This returns the full identifier of the primary NIC for the given VM.
|
||||
func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) {
|
||||
if machine.NetworkProfile == nil || machine.NetworkProfile.NetworkInterfaces == nil {
|
||||
return "", fmt.Errorf("failed to find the network interfaces for vm %s", to.String(machine.Name))
|
||||
}
|
||||
|
||||
if len(*machine.NetworkProfile.NetworkInterfaces) == 1 {
|
||||
return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil
|
||||
}
|
||||
|
||||
for _, ref := range *machine.NetworkProfile.NetworkInterfaces {
|
||||
if *ref.Primary {
|
||||
if to.Bool(ref.Primary) {
|
||||
return *ref.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name)
|
||||
return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", to.String(machine.Name))
|
||||
}
|
||||
|
||||
// getVmssMachineID returns the full identifier of a vmss virtual machine.
|
||||
@ -692,6 +696,9 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu
|
||||
allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView))
|
||||
if rerr != nil {
|
||||
klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", rerr)
|
||||
if rerr.IsNotFound() {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return nil, rerr.Error()
|
||||
}
|
||||
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go
generated
vendored
@ -87,7 +87,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
|
||||
}
|
||||
return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname))
|
||||
return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname))
|
||||
}
|
||||
|
||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
|
||||
@ -104,7 +104,7 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID)
|
||||
nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
|
||||
if err != nil {
|
||||
return cloudprovider.Zone{}, err
|
||||
}
|
||||
@ -126,5 +126,5 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName)
|
||||
return cloudprovider.Zone{}, nil
|
||||
}
|
||||
|
||||
return az.vmSet.GetZoneByNodeName(string(nodeName))
|
||||
return az.VMSet.GetZoneByNodeName(string(nodeName))
|
||||
}
|
||||
|
1
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD
generated
vendored
1
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD
generated
vendored
@ -19,6 +19,7 @@ go_library(
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
193
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go
generated
vendored
193
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go
generated
vendored
@ -20,12 +20,14 @@ package diskclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/klog/v2"
|
||||
@ -204,6 +206,74 @@ func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Disk, *r
|
||||
return result, retry.GetError(resp, err)
|
||||
}
|
||||
|
||||
// Update creates or updates a Disk.
|
||||
func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
|
||||
mc := metrics.NewMetricContext("disks", "update", resourceGroupName, c.subscriptionID, "")
|
||||
|
||||
// Report errors if the client is rate limited.
|
||||
if !c.rateLimiterWriter.TryAccept() {
|
||||
mc.RateLimitedCount()
|
||||
return retry.GetRateLimitError(true, "DiskUpdate")
|
||||
}
|
||||
|
||||
// Report errors if the client is throttled.
|
||||
if c.RetryAfterWriter.After(time.Now()) {
|
||||
mc.ThrottledCount()
|
||||
rerr := retry.GetThrottlingError("DiskUpdate", "client throttled", c.RetryAfterWriter)
|
||||
return rerr
|
||||
}
|
||||
|
||||
rerr := c.updateDisk(ctx, resourceGroupName, diskName, diskParameter)
|
||||
mc.Observe(rerr.Error())
|
||||
if rerr != nil {
|
||||
if rerr.IsThrottled() {
|
||||
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
|
||||
c.RetryAfterWriter = rerr.RetryAfter
|
||||
}
|
||||
|
||||
return rerr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDisk updates a Disk.
|
||||
func (c *Client) updateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
|
||||
resourceID := armclient.GetResourceID(
|
||||
c.subscriptionID,
|
||||
resourceGroupName,
|
||||
"Microsoft.Compute/disks",
|
||||
diskName,
|
||||
)
|
||||
|
||||
response, rerr := c.armClient.PatchResource(ctx, resourceID, diskParameter)
|
||||
defer c.armClient.CloseResponse(ctx, response)
|
||||
if rerr != nil {
|
||||
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.request", resourceID, rerr.Error())
|
||||
return rerr
|
||||
}
|
||||
|
||||
if response != nil && response.StatusCode != http.StatusNoContent {
|
||||
_, rerr = c.updateResponder(response)
|
||||
if rerr != nil {
|
||||
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.respond", resourceID, rerr.Error())
|
||||
return rerr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) updateResponder(resp *http.Response) (*compute.Disk, *retry.Error) {
|
||||
result := &compute.Disk{}
|
||||
err := autorest.Respond(
|
||||
resp,
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||
autorest.ByUnmarshallingJSON(&result))
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, retry.GetError(resp, err)
|
||||
}
|
||||
|
||||
// Delete deletes a Disk by name.
|
||||
func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error {
|
||||
mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "")
|
||||
@ -246,3 +316,126 @@ func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskN
|
||||
|
||||
return c.armClient.DeleteResource(ctx, resourceID, "")
|
||||
}
|
||||
|
||||
// ListByResourceGroup lists all the disks under a resource group.
|
||||
func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
|
||||
resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks",
|
||||
autorest.Encode("path", c.subscriptionID),
|
||||
autorest.Encode("path", resourceGroupName))
|
||||
|
||||
result := make([]compute.Disk, 0)
|
||||
page := &DiskListPage{}
|
||||
page.fn = c.listNextResults
|
||||
|
||||
resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
|
||||
defer c.armClient.CloseResponse(ctx, resp)
|
||||
if rerr != nil {
|
||||
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.request", resourceID, rerr.Error())
|
||||
return result, rerr
|
||||
}
|
||||
|
||||
var err error
|
||||
page.dl, err = c.listResponder(resp)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.respond", resourceID, err)
|
||||
return result, retry.GetError(resp, err)
|
||||
}
|
||||
|
||||
for page.NotDone() {
|
||||
result = append(result, *page.Response().Value...)
|
||||
if err = page.NextWithContext(ctx); err != nil {
|
||||
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.next", resourceID, err)
|
||||
return result, retry.GetError(page.Response().Response.Response, err)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (c *Client) listNextResults(ctx context.Context, lastResults compute.DiskList) (result compute.DiskList, err error) {
|
||||
req, err := c.diskListPreparer(ctx, lastResults)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "diskclient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp, rerr := c.armClient.Send(ctx, req)
|
||||
defer c.armClient.CloseResponse(ctx, resp)
|
||||
if rerr != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(rerr.Error(), "diskclient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
|
||||
result, err = c.listResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "diskclient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// listResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) {
|
||||
if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
return autorest.Prepare((&http.Request{}).WithContext(ctx),
|
||||
autorest.AsJSON(),
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(to.String(lr.NextLink)))
|
||||
}
|
||||
|
||||
// DiskListPage contains a page of Disk values.
|
||||
type DiskListPage struct {
|
||||
fn func(context.Context, compute.DiskList) (compute.DiskList, error)
|
||||
dl compute.DiskList
|
||||
}
|
||||
|
||||
// NextWithContext advances to the next page of values. If there was an error making
|
||||
// the request the page does not advance and the error is returned.
|
||||
func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) {
|
||||
next, err := page.fn(ctx, page.dl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
page.dl = next
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next advances to the next page of values. If there was an error making
|
||||
// the request the page does not advance and the error is returned.
|
||||
// Deprecated: Use NextWithContext() instead.
|
||||
func (page *DiskListPage) Next() error {
|
||||
return page.NextWithContext(context.Background())
|
||||
}
|
||||
|
||||
// NotDone returns true if the page enumeration should be started or is not yet complete.
|
||||
func (page DiskListPage) NotDone() bool {
|
||||
return !page.dl.IsEmpty()
|
||||
}
|
||||
|
||||
// Response returns the raw server response from the last page request.
|
||||
func (page DiskListPage) Response() compute.DiskList {
|
||||
return page.dl
|
||||
}
|
||||
|
||||
// Values returns the slice of values for the current page or nil if there are no values.
|
||||
func (page DiskListPage) Values() []compute.Disk {
|
||||
if page.dl.IsEmpty() {
|
||||
return nil
|
||||
}
|
||||
return *page.dl.Value
|
||||
}
|
||||
|
6
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go
generated
vendored
6
vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go
generated
vendored
@ -40,6 +40,12 @@ type Interface interface {
|
||||
// CreateOrUpdate creates or updates a Disk.
|
||||
CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error
|
||||
|
||||
// Update updates a Disk.
|
||||
Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error
|
||||
|
||||
// Delete deletes a Disk by name.
|
||||
Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error
|
||||
|
||||
// ListByResourceGroup lists all the disks under a resource group.
|
||||
ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error)
|
||||
}
|
||||
|
@ -79,6 +79,20 @@ func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, disk
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, diskName, diskParameter)
|
||||
}
|
||||
|
||||
// Update mocks base method
|
||||
func (m *MockInterface) Update(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, diskName, diskParameter)
|
||||
ret0, _ := ret[0].(*retry.Error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Update indicates an expected call of Update
|
||||
func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, diskName, diskParameter)
|
||||
}
|
||||
|
||||
// Delete mocks base method
|
||||
func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, diskName string) *retry.Error {
|
||||
m.ctrl.T.Helper()
|
||||
@ -92,3 +106,18 @@ func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName int
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName)
|
||||
}
|
||||
|
||||
// ListByResourceGroup mocks base method
|
||||
func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName)
|
||||
ret0, _ := ret[0].([]compute.Disk)
|
||||
ret1, _ := ret[1].(*retry.Error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// ListByResourceGroup indicates an expected call of ListByResourceGroup
|
||||
func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName)
|
||||
}
|
||||
|
1
vendor/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go
generated
vendored
1
vendor/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go
generated
vendored
@ -86,6 +86,7 @@ func registerAPIMetrics(attributes ...string) *apiCallMetrics {
|
||||
&metrics.HistogramOpts{
|
||||
Name: "cloudprovider_azure_api_request_duration_seconds",
|
||||
Help: "Latency of an Azure API call",
|
||||
Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600, 1200},
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
attributes,
|
||||
|
26
vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go
generated
vendored
26
vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go
generated
vendored
@ -89,6 +89,15 @@ func (err *Error) IsThrottled() bool {
|
||||
return err.HTTPStatusCode == http.StatusTooManyRequests || err.RetryAfter.After(now())
|
||||
}
|
||||
|
||||
// IsNotFound returns true the if the requested object wasn't found
|
||||
func (err *Error) IsNotFound() bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return err.HTTPStatusCode == http.StatusNotFound
|
||||
}
|
||||
|
||||
// NewError creates a new Error.
|
||||
func NewError(retriable bool, err error) *Error {
|
||||
return &Error{
|
||||
@ -286,3 +295,20 @@ func IsErrorRetriable(err error) bool {
|
||||
|
||||
return strings.Contains(err.Error(), "Retriable: true")
|
||||
}
|
||||
|
||||
// HasStatusForbiddenOrIgnoredError return true if the given error code is part of the error message
|
||||
// This should only be used when trying to delete resources
|
||||
func HasStatusForbiddenOrIgnoredError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.Contains(err.Error(), fmt.Sprintf("HTTPStatusCode: %d", http.StatusNotFound)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.Contains(err.Error(), fmt.Sprintf("HTTPStatusCode: %d", http.StatusForbidden)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
4
vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go
generated
vendored
4
vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere.go
generated
vendored
@ -1500,7 +1500,7 @@ func (vs *VSphere) SecretAdded(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).Infof("secret added: %+v", obj)
|
||||
klog.V(4).Infof("refreshing node cache for secret: %s/%s", secret.Namespace, secret.Name)
|
||||
vs.refreshNodesForSecretChange()
|
||||
}
|
||||
|
||||
@ -1524,7 +1524,7 @@ func (vs *VSphere) SecretUpdated(obj interface{}, newObj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).Infof("secret updated: %+v", newObj)
|
||||
klog.V(4).Infof("refreshing node cache for secret: %s/%s", secret.Namespace, secret.Name)
|
||||
vs.refreshNodesForSecretChange()
|
||||
}
|
||||
|
||||
|
93
vendor/modules.txt
vendored
93
vendor/modules.txt
vendored
@ -1339,7 +1339,7 @@ gopkg.in/warnings.v0
|
||||
gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.2-k3s1
|
||||
# k8s.io/api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -1385,7 +1385,7 @@ k8s.io/api/settings/v1alpha1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.2-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
@ -1425,7 +1425,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.2-k3s1
|
||||
# k8s.io/apimachinery v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -1489,7 +1489,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.2-k3s1
|
||||
# k8s.io/apiserver v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
@ -1617,7 +1617,7 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.2-k3s1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps
|
||||
@ -1630,7 +1630,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.2-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached
|
||||
@ -1869,7 +1869,7 @@ k8s.io/client-go/util/jsonpath
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.2-k3s1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
@ -1882,13 +1882,13 @@ k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.2-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.2-k3s1
|
||||
# k8s.io/code-generator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
@ -1903,7 +1903,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.2-k3s1
|
||||
# k8s.io/component-base v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
@ -1927,11 +1927,11 @@ k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/term
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/cri-api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.2-k3s1
|
||||
# k8s.io/cri-api v0.19.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.2-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14
|
||||
@ -1949,7 +1949,7 @@ k8s.io/heapster/metrics/api/v1/types
|
||||
k8s.io/klog
|
||||
# k8s.io/klog/v2 v2.2.0
|
||||
k8s.io/klog/v2
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.2-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
@ -1977,7 +1977,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.2-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
@ -1988,13 +1988,13 @@ k8s.io/kube-openapi/pkg/schemaconv
|
||||
k8s.io/kube-openapi/pkg/util
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.2-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.2-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1beta1
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.2-k3s1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
@ -2070,11 +2070,11 @@ k8s.io/kubectl/pkg/util/storage
|
||||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.2-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
# k8s.io/kubernetes v1.19.0 => github.com/rancher/kubernetes v1.19.2-k3s1
|
||||
# k8s.io/kubernetes v1.19.3 => github.com/rancher/kubernetes v1.19.3-k3s1
|
||||
## explicit
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app
|
||||
k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config
|
||||
@ -2095,7 +2095,6 @@ k8s.io/kubernetes/cmd/kube-scheduler/app/config
|
||||
k8s.io/kubernetes/cmd/kube-scheduler/app/options
|
||||
k8s.io/kubernetes/cmd/kubelet/app
|
||||
k8s.io/kubernetes/cmd/kubelet/app/options
|
||||
k8s.io/kubernetes/pkg/api/endpoints
|
||||
k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
k8s.io/kubernetes/pkg/api/persistentvolume
|
||||
k8s.io/kubernetes/pkg/api/persistentvolumeclaim
|
||||
@ -2814,7 +2813,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.2-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
@ -2856,7 +2855,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.2-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
@ -2952,29 +2951,29 @@ vbom.ml/util/sortorder
|
||||
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
# google.golang.org/grpc => google.golang.org/grpc v1.26.0
|
||||
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
# k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.2-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.2-k3s1
|
||||
# k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.2-k3s1
|
||||
# k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.2-k3s1
|
||||
# k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.2-k3s1
|
||||
# k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.2-k3s1
|
||||
# k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.2-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.2-k3s1
|
||||
# k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.2-k3s1
|
||||
# k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.2-k3s1
|
||||
# k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.2-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.2-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.2-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.2-k3s1
|
||||
# k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.2-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.2-k3s1
|
||||
# k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.2-k3s1
|
||||
# k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.2-k3s1
|
||||
# k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.2-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.2-k3s1
|
||||
# k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.2-k3s1
|
||||
# k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.2-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.2-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.2-k3s1
|
||||
# k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.2-k3s1
|
||||
# k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.19.3-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.19.3-k3s1
|
||||
# k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.19.3-k3s1
|
||||
# k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.19.3-k3s1
|
||||
# k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.19.3-k3s1
|
||||
# k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.19.3-k3s1
|
||||
# k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.19.3-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.19.3-k3s1
|
||||
# k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.19.3-k3s1
|
||||
# k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.19.3-k3s1
|
||||
# k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.19.3-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.19.3-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.19.3-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.19.3-k3s1
|
||||
# k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.19.3-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.19.3-k3s1
|
||||
# k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.19.3-k3s1
|
||||
# k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.19.3-k3s1
|
||||
# k8s.io/kubernetes => github.com/rancher/kubernetes v1.19.3-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.19.3-k3s1
|
||||
# k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.19.3-k3s1
|
||||
# k8s.io/node-api => github.com/rancher/kubernetes/staging/src/k8s.io/node-api v1.19.3-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.19.3-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.19.3-k3s1
|
||||
# k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.19.3-k3s1
|
||||
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
|
||||
|
Loading…
Reference in New Issue
Block a user