mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Update Kubernetes to v1.22.2-k3s1
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
parent
eda65b19d9
commit
77dfdda909
82
go.mod
82
go.mod
@ -25,7 +25,7 @@ replace (
|
||||
github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
||||
github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
||||
// LOOK TO scripts/download FOR THE VERSION OF runc THAT WE ARE BUILDING/SHIPPING
|
||||
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.1
|
||||
github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.2
|
||||
github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b
|
||||
github.com/rancher/k3s/pkg/data => ./pkg/data
|
||||
github.com/rancher/wrangler => github.com/rancher/wrangler v0.8.5
|
||||
@ -39,37 +39,37 @@ replace (
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.38.0
|
||||
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.1-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.1-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.1-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.1-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.1-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.1-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.1-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.1-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.1-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.1-k3s1
|
||||
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.1-k3s1
|
||||
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.1-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.1-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.1-k3s1
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.2-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.2-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.2-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.2-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.2-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.2-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.2-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.2-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.2-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.2-k3s1
|
||||
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.2-k3s1
|
||||
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.2-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.2-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.2-k3s1
|
||||
k8s.io/klog => github.com/k3s-io/klog v1.0.0-k3s1 // k3s-release-1.x
|
||||
k8s.io/klog/v2 => github.com/k3s-io/klog/v2 v2.9.0-k3s1 // k3s-main
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.1-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.1-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.1-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.1-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.1-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.1-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.1-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.1-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.1-k3s1
|
||||
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.1-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.1-k3s1
|
||||
k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.1-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.1-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.1-k3s1
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.2-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.2-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.2-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.2-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.2-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.2-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.2-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.2-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.2-k3s1
|
||||
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.2-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.2-k3s1
|
||||
k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.2-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.2-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.2-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.2-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
|
||||
)
|
||||
|
||||
@ -102,7 +102,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.11.0
|
||||
// LOOK TO scripts/download FOR THE VERSION OF runc THAT WE ARE BUILDING/SHIPPING
|
||||
github.com/opencontainers/runc v1.0.1
|
||||
github.com/opencontainers/runc v1.0.2
|
||||
github.com/opencontainers/selinux v1.8.2
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
@ -128,18 +128,18 @@ require (
|
||||
google.golang.org/grpc v1.40.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
k8s.io/apiserver v0.22.1
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
k8s.io/apiserver v0.22.2
|
||||
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
|
||||
k8s.io/cloud-provider v0.22.1
|
||||
k8s.io/component-base v0.22.1
|
||||
k8s.io/controller-manager v0.22.1 // indirect
|
||||
k8s.io/cri-api v0.22.1
|
||||
k8s.io/cloud-provider v0.22.2
|
||||
k8s.io/component-base v0.22.2
|
||||
k8s.io/controller-manager v0.22.2 // indirect
|
||||
k8s.io/cri-api v0.22.2
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kubectl v0.22.1
|
||||
k8s.io/kubernetes v1.22.1
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
|
||||
k8s.io/kubectl v0.22.2
|
||||
k8s.io/kubernetes v1.22.2
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
109
go.sum
109
go.sum
@ -577,57 +577,57 @@ github.com/k3s-io/klog v1.0.0-k3s1 h1:Bg+gRta3s4sfbaYUSWbHcMEyVdxdaU1cJCRtWcaxjB
|
||||
github.com/k3s-io/klog v1.0.0-k3s1/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
github.com/k3s-io/klog/v2 v2.9.0-k3s1 h1:q4DqcZgBG+D2TSTGx6JcP1cuXeoxSoixSLE3casDcSw=
|
||||
github.com/k3s-io/klog/v2 v2.9.0-k3s1/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
github.com/k3s-io/kubernetes v1.22.1-k3s1 h1:evi+JkMnC0HDEJ8A7aLqPKZ+0cP/CK6udFquf7KVq1I=
|
||||
github.com/k3s-io/kubernetes v1.22.1-k3s1/go.mod h1:IGQZrV02n2IBp52+/YwLVMurCEQPKXJ/k8hU3mqEOuA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.1-k3s1 h1:BlTOjlJwCV9ZNgTgyByLJXn7emlDAQC/duN14pUtWLk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.1-k3s1/go.mod h1:IpPnJRE5t3olVaut5p67N16cZkWwwU5KVFM35xCKyxM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.1-k3s1 h1:yKGX4MWbCTLY+6a4Lm2v1xyljWUUQGDVdzGIwcJ4SsE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.1-k3s1/go.mod h1:jTgFcW8xltjKznImgnPThxdONRgdN7N6TCjeDBpp8Ac=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.1-k3s1 h1:bPJYlbNE9JVqi6ouXztSypFsq4Q66OPJRKIbSRgmos4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.1-k3s1/go.mod h1:J+pXqtTbzz3Sg3WGrq6bFtKQ2D9xbnRY3VdVaX0lJjw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.1-k3s1 h1:5q8/FQ3isrKt2zGg4ctB3NoC/gqoi14xsUuDovylxtg=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.1-k3s1/go.mod h1:IpeVKsnwInYf7NCGiePI9VMXcHHAlBiZIgvNiMAkyAw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.1-k3s1 h1:NObYOsaMwn4ewrRML6hL5BW39V7w9B6W96Re592R8qo=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.1-k3s1/go.mod h1:/G/EQBeZVcKQ6+aqZhYUZUB7zh13XkshYpnX4m75hGM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.1-k3s1 h1:sEoUpwfH2ozun0qMWd4xI8tbYXe07//InC21nO457Ac=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.1-k3s1/go.mod h1:n1bRYCJq9UxbItQh0czjVsV9jyPrw3eyuVcldthrIAs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.1-k3s1 h1:Hihe3HHkxVFFE7EU+he2ybvD6o4S7Qr/Ru8XQY9Y2Wk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.1-k3s1/go.mod h1:S4VzMEga23uK7wdtJ7kpdYChDEwtcWiJF90jKDJ4IU0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.1-k3s1 h1:Ze2YXbZnF3XD0jJ6Nq5J3NG2oC4Nb8ahj/H9QFAxZcw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.1-k3s1/go.mod h1:ppZJmhTukDTa5g/F0ksVMLM0Owbi9GeKhzuTXAVVJig=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.1-k3s1 h1:oooqmzI86gx6W9hfVVxRlCupYeJuSP+8LHel5vaR8m8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.1-k3s1/go.mod h1:sUUmwtmwhRVMXuCc1xDCW0VAqgY6LwabgtXcWxMBL8Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.1-k3s1 h1:lOiUEvqElLPnmt33tTCgU/yIb0g9sYxj2ggegMdvQvc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.1-k3s1/go.mod h1:o9dIqwQ1nmWNusj8M3G0ftwE9y/7wLG1UuOB+6oIZLQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.1-k3s1 h1:kjGiogzGTFeCGpVKO/X2UDm+Q8/Mz+0Sbgl/QeU4F1U=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.1-k3s1/go.mod h1:tUxhhYUyZYWYN+8Rh4NKWdwOAxu3iWNjd3JR/TeI7yM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.1-k3s1 h1:EG8Mo8KLMCanQcYQDJMZHX3RpDfepTOjGdt4g5Wm58A=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.1-k3s1/go.mod h1:BzM6LWVd6MwkVvqBMldlMljM9XqrmfpR4ZCTu8AEF24=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.1-k3s1 h1:BtWhKdbchZvHxq8Bx3hW4QR8PRB+DSUOXypxDAw5wMU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.1-k3s1/go.mod h1:2XpB3zC24SSO5mhWTzWwBHrO5rO19ncH1bdqS+VuQsI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.1-k3s1 h1:P3tfVLaNYQfApsyyMoNNRXiUJ7F6U4WStO1waHFNnYY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.1-k3s1/go.mod h1:B1gPUSbK2PVSnkxCgw/fmDckzQU6UCuyl670XFbEw6Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.1-k3s1 h1:R2RxG/bkYNMnKf7aGDhxn34SrYKYRK91cXSA3YyHaH4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.1-k3s1/go.mod h1:JTBHh8x0LVirQ89mRvXz9rHS8Ej264096lXoyJu4uyE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.1-k3s1 h1:hIM86VXBs+67JZlv0hZa6ojjQeR+52MPwSpO9yaeBYA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.1-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.1-k3s1 h1:WmKsy3kAHq35FzBqXrYlARaPcGd42Sd8LttyYrZYHvM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.1-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.1-k3s1 h1:lGZLZL1dJRJa9kbFaM7wIAtXoApcla4zzrLUFVZbWmE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.1-k3s1/go.mod h1:xZnfOrGTta6rB9IWNKl82yzWKpMSUXVmyGHRilQ9kzM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.1-k3s1 h1:XVyPTFpVqil7FHXZpD9yOdo9hl84i4CeutyQqFjlgKE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.1-k3s1/go.mod h1:bdkJAgdbfEwcH39xZympq6yCtuf9jgrAZfnwIohROao=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.1-k3s1 h1:ReUUO/fkRek24Os4NIAzDiu7B7WP+Rk6DI5V/jpZDa4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.1-k3s1/go.mod h1:ykhJBi1kXSUeYSTzHe2F6A8nDAfF2jjClsagmgX96vk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.1-k3s1 h1:+4D8jZy/+JU52nTUU9Ko8tSlA50oqhjLh+h5wei+Ltw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.1-k3s1/go.mod h1:4u5WY4rN5o2F3Zq+LAdz7X2tSnpFinnFk75JfIuhQP8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.1-k3s1 h1:hGxCDmTwu2B+Cazh1zWMgDAHYwieUwiH5sc0D5Dtv8I=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.1-k3s1/go.mod h1:I5RbQZ+gj12KSgWzMyHaE0hudGajvT/Nc5jRE/WMJnI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.1-k3s1 h1:vmtN99At+RRyDOiIciCYfnp4X34D54hQ/tFxNRvnwdE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.1-k3s1/go.mod h1:lea5OqpOLnvnHJSArsNVLNXGWjQ8I5dWOAfUGZcwG0Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1 h1:FUdS9NJfFIywAoONqeUVE56BUA9Y4k7QC3dD9o2Kz9Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1/go.mod h1:MfbK6LD+Nhyzoy2TEg4jjcicjhy8UQp9IXrCxLIJhAE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.1-k3s1/go.mod h1:mpLHq04wAiOpaWE4BI8ArSQp82DIgRirioGL6CryJDg=
|
||||
github.com/k3s-io/kubernetes v1.22.2-k3s1 h1:A69iFa6z4k8M7s9B/LHgnu2aag58tjV69sYtJBuqRUQ=
|
||||
github.com/k3s-io/kubernetes v1.22.2-k3s1/go.mod h1:Snea7fgIObGgHmLbUJ3OgjGEr5bjj16iEdp5oHS6eS8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.2-k3s1 h1:J+a4JAvA1+jYnngUil3lF7BtyvNKfWDiVUyOhxqdBAA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.2-k3s1/go.mod h1:IpPnJRE5t3olVaut5p67N16cZkWwwU5KVFM35xCKyxM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.2-k3s1 h1:GbRNFSXK1PesH5Ux/WpxQ4Yp4sUUy9RjIoOW9qqnW24=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.2-k3s1/go.mod h1:QmWu0gjtyJOtpuSADyICOuml8CoD/poBm+IV/jrT4D8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.2-k3s1 h1:NqDJnV90/4FB0cKGXnP0utV8oxTz95N/f5+Lh6XVzmM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.2-k3s1/go.mod h1:J+pXqtTbzz3Sg3WGrq6bFtKQ2D9xbnRY3VdVaX0lJjw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.2-k3s1 h1:1/DtVGve5WhbCL3KP4mU12HQkyXtdep43FDuORo8Jy0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.2-k3s1/go.mod h1:LGIW+JHWnonf/5jG9JRDEXxsZsbTKcC/4aMljJaz8bA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.2-k3s1 h1:INmjrqx7+XkbPAVr84AhizEMMsB0lPMhPLGEYAolXJM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.2-k3s1/go.mod h1:/G/EQBeZVcKQ6+aqZhYUZUB7zh13XkshYpnX4m75hGM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.2-k3s1 h1:UM/RWHDq3ZbsdmdEu06AjctGr2Ww3XEIGYmVBHV/x8s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.2-k3s1/go.mod h1:O2pnHGDR/pgPeefYRaZj7oAOfA3WukxKhGFzkGhyIME=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.2-k3s1 h1:ARy/1tHPTDzvlr45R/IkzhDHQhC4GXgQneCnR86qkyM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.2-k3s1/go.mod h1:YfjUcxHPiB9x/eHUrBtefZ61AuHGSDXfyXtsLS5UlMQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.2-k3s1 h1:6qI++O0pH63txyDfxWj6D21R3d2uR49hCiB0i3xgWxk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.2-k3s1/go.mod h1:ppZJmhTukDTa5g/F0ksVMLM0Owbi9GeKhzuTXAVVJig=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.2-k3s1 h1:OU8umkoSuLinfcfTgRqkP8Mb4XDNVADHk7yP5kI6zwc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.2-k3s1/go.mod h1:sUUmwtmwhRVMXuCc1xDCW0VAqgY6LwabgtXcWxMBL8Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.2-k3s1 h1:c+R2WWtt/a5ZLbC8GVoN5Pdwsxr+EKDvWufSpIJHW+s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.2-k3s1/go.mod h1:cn9EB9A1wujtKWsHqB9lkYq8FL4dUuftmiqNyXIQEmE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.2-k3s1 h1:7BBJpO1wiS6WY2FCget1vZ9h1hXHWf0Y2oXumG14jLU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.2-k3s1/go.mod h1:9Bx6HezI9sKzn5Boasw7vMT8FRgcXsExOoT87Wzdls4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.2-k3s1 h1:++sVCT1BR5ZeItCdq2crZSkD0ynNvyeRou8vMpyq8PM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.2-k3s1/go.mod h1:aPin+82yKPEirDGBtNS/4fcc3a1QVOqdt6zzxOlrfc8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.2-k3s1 h1:WM4SeVYP+fj+9ORQzOTwHPy861ADReV0EVogflZ7bNU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.2-k3s1/go.mod h1:2XpB3zC24SSO5mhWTzWwBHrO5rO19ncH1bdqS+VuQsI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.2-k3s1 h1:QgqhsuRN3ivJ4R1/uUH8hdsysKdR+iRb33tKcFRCIT4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.2-k3s1/go.mod h1:B1gPUSbK2PVSnkxCgw/fmDckzQU6UCuyl670XFbEw6Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.2-k3s1 h1:D3S3p909hpE8cqZwsH0TIgT9fiySuaEFEgHKOakT3N8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.2-k3s1/go.mod h1:LyGGFXi1A101JZQjBNy4RgVgAwtQ5UhRy2KfX/STctk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.2-k3s1 h1:fRqlgH8KDoK+lN4UTfXy+LP5c+TNcSg/kKrO9mw9CeY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.2-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.2-k3s1 h1:PImWWdl0iCo3xvpDnGYUZfl1RiCIl0g+n2QG3TrandA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.2-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.2-k3s1 h1:ac8sZvCj41wkqCNa2j4AyOsqP/ywOc95m8rPV/L0abg=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.2-k3s1/go.mod h1:xZnfOrGTta6rB9IWNKl82yzWKpMSUXVmyGHRilQ9kzM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.2-k3s1 h1:e1B+AysTfIYLlZsbBhqUCW75cchf/KmigbGyF8NQwOA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.2-k3s1/go.mod h1:ekgaQk/DhXcVFh7pMs0S8QU7uSGhC5XXcmX2MfMz+PQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.2-k3s1 h1:3pr4xfbUIY6Gf/RmU9Ezr2OOlgjOoq91vHWXeRq2PmE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.2-k3s1/go.mod h1:ykhJBi1kXSUeYSTzHe2F6A8nDAfF2jjClsagmgX96vk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.2-k3s1 h1:8w6/NVnbutMl4UErTVMoksmOixADXiV+hTyb2Mf+M58=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.2-k3s1/go.mod h1:X8EaUY5K2IM/62KAMuHGuHyOhsJwvsoRwdvsyWjm++g=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.2-k3s1 h1:Ky+O7SBEzCDyIcm9wh29c5w7IMQtA4HVYp/OrNL2ZEc=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.2-k3s1/go.mod h1:I5RbQZ+gj12KSgWzMyHaE0hudGajvT/Nc5jRE/WMJnI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.2-k3s1 h1:5J+ZWv5YCyMarcK18lCfsVQUHW2jP4cunnxMDQHPWMI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.2-k3s1/go.mod h1:7UvmmOyjKl2RW0tgpT4l1z7dxVV4TMnAAlSN95cpUeM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.2-k3s1 h1:42oVOEuwyrYDuVw1RKzc78Q3iTjzh3wwWI5Hk85/Rbw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.2-k3s1/go.mod h1:NQr24M7bou27uJhKTCgiz8L/BbJm2G9ZjUQOmO1ZkDE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.2-k3s1/go.mod h1:mpLHq04wAiOpaWE4BI8ArSQp82DIgRirioGL6CryJDg=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
@ -799,8 +799,8 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283 h1:TVzvdjOalkJBNkbpPVMAr4KV9QRf2IjfxdyxwAK78Gs=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
|
||||
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b h1:ZDY8P/luqXqGJSNCux8+9GeKmBDS+JVgVuIwKTauiwM=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
@ -1309,8 +1309,9 @@ k8s.io/system-validators v1.5.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6g
|
||||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
|
157
vendor/github.com/opencontainers/runc/.cirrus.yml
generated
vendored
Normal file
157
vendor/github.com/opencontainers/runc/.cirrus.yml
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
---
|
||||
# We use Cirrus for Vagrant tests and native CentOS 7 and 8, because macOS
|
||||
# instances of GHA are too slow and flaky, and Linux instances of GHA do not
|
||||
# support KVM.
|
||||
|
||||
# NOTE Cirrus execution environments lack a terminal, needed for
|
||||
# some integration tests. So we use `ssh -tt` command to fake a terminal.
|
||||
|
||||
task:
|
||||
timeout_in: 30m
|
||||
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
HOME: /root
|
||||
# yamllint disable rule:key-duplicates
|
||||
matrix:
|
||||
DISTRO: fedora34
|
||||
|
||||
name: vagrant DISTRO:$DISTRO
|
||||
|
||||
compute_engine_instance:
|
||||
image_project: cirrus-images
|
||||
image: family/docker-kvm
|
||||
platform: linux
|
||||
nested_virtualization: true
|
||||
# CPU limit: `16 / NTASK`: see https://cirrus-ci.org/faq/#are-there-any-limits
|
||||
cpu: 8
|
||||
# Memory limit: `4GB * NCPU`
|
||||
memory: 32G
|
||||
|
||||
host_info_script: |
|
||||
uname -a
|
||||
echo "-----"
|
||||
cat /etc/os-release
|
||||
echo "-----"
|
||||
cat /proc/cpuinfo
|
||||
echo "-----"
|
||||
df -T
|
||||
install_libvirt_vagrant_script: |
|
||||
apt-get update
|
||||
apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt
|
||||
systemctl enable --now libvirtd
|
||||
vagrant_cache:
|
||||
fingerprint_script: uname -s ; cat Vagrantfile.$DISTRO
|
||||
folder: /root/.vagrant.d
|
||||
vagrant_up_script: |
|
||||
ln -sf Vagrantfile.$DISTRO Vagrantfile
|
||||
# Retry if it fails (download.fedoraproject.org returns 404 sometimes)
|
||||
vagrant up || vagrant up
|
||||
mkdir -p -m 0700 /root/.ssh
|
||||
vagrant ssh-config >> /root/.ssh/config
|
||||
guest_info_script: |
|
||||
ssh default 'sh -exc "uname -a && systemctl --version && df -T && cat /etc/os-release"'
|
||||
unit_tests_script: |
|
||||
ssh default 'sudo -i make -C /vagrant localunittest'
|
||||
integration_systemd_script: |
|
||||
ssh -tt default "sudo -i make -C /vagrant localintegration RUNC_USE_SYSTEMD=yes"
|
||||
integration_fs_script: |
|
||||
ssh -tt default "sudo -i make -C /vagrant localintegration"
|
||||
integration_systemd_rootless_script: |
|
||||
if [ $DISTRO == centos7 ]; then
|
||||
echo "SKIP: integration_systemd_rootless_script requires cgroup v2"
|
||||
else
|
||||
ssh -tt default "sudo -i make -C /vagrant localrootlessintegration RUNC_USE_SYSTEMD=yes"
|
||||
fi
|
||||
integration_fs_rootless_script: |
|
||||
if [ $DISTRO == centos7 ]; then
|
||||
echo "SKIP: FIXME: integration_fs_rootless_script is skipped because of EPERM on writing cgroup.procs"
|
||||
else
|
||||
ssh -tt default "sudo -i make -C /vagrant localrootlessintegration"
|
||||
fi
|
||||
|
||||
task:
|
||||
timeout_in: 30m
|
||||
|
||||
env:
|
||||
HOME: /root
|
||||
CIRRUS_WORKING_DIR: /home/runc
|
||||
GO_VERSION: "1.16.6"
|
||||
BATS_VERSION: "v1.3.0"
|
||||
# yamllint disable rule:key-duplicates
|
||||
matrix:
|
||||
DISTRO: centos-7
|
||||
DISTRO: centos-stream-8
|
||||
|
||||
name: ci / $DISTRO
|
||||
|
||||
compute_engine_instance:
|
||||
image_project: centos-cloud
|
||||
image: family/$DISTRO
|
||||
platform: linux
|
||||
cpu: 4
|
||||
memory: 8G
|
||||
|
||||
install_dependencies_script: |
|
||||
yum install -y -q epel-release
|
||||
case $DISTRO in
|
||||
centos-7)
|
||||
(cd /etc/yum.repos.d && curl -O https://copr.fedorainfracloud.org/coprs/adrian/criu-el7/repo/epel-7/adrian-criu-el7-epel-7.repo)
|
||||
# sysctl
|
||||
echo "user.max_user_namespaces=15076" > /etc/sysctl.d/userns.conf
|
||||
sysctl --system
|
||||
;;
|
||||
centos-stream-8)
|
||||
yum install -y -q dnf-plugins-core
|
||||
yum config-manager --set-enabled powertools
|
||||
;;
|
||||
esac
|
||||
yum install -y -q gcc git iptables jq glibc-static libseccomp-devel make criu
|
||||
# install Go
|
||||
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
|
||||
# install bats
|
||||
cd /tmp
|
||||
git clone https://github.com/bats-core/bats-core
|
||||
cd bats-core
|
||||
git checkout $BATS_VERSION
|
||||
./install.sh /usr/local
|
||||
cd -
|
||||
# Add a user for rootless tests
|
||||
useradd -u2000 -m -d/home/rootless -s/bin/bash rootless
|
||||
# set PATH
|
||||
echo 'export PATH=/usr/local/go/bin:/usr/local/bin:$PATH' >> /root/.bashrc
|
||||
# Setup ssh localhost for terminal emulation (script -e did not work)
|
||||
ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""
|
||||
cat /root/.ssh/id_ed25519.pub >> /root/.ssh/authorized_keys
|
||||
chmod 400 /root/.ssh/authorized_keys
|
||||
ssh-keyscan localhost >> /root/.ssh/known_hosts
|
||||
echo -e "Host localhost\n\tStrictHostKeyChecking no\t\nIdentityFile /root/.ssh/id_ed25519\n" >> /root/.ssh/config
|
||||
sed -e "s,PermitRootLogin.*,PermitRootLogin prohibit-password,g" -i /etc/ssh/sshd_config
|
||||
systemctl restart sshd
|
||||
host_info_script: |
|
||||
uname -a
|
||||
echo "-----"
|
||||
cat /etc/os-release
|
||||
echo "-----"
|
||||
cat /proc/cpuinfo
|
||||
echo "-----"
|
||||
df -T
|
||||
echo "-----"
|
||||
systemctl --version
|
||||
unit_tests_script: |
|
||||
ssh -tt localhost "make -C /home/runc localunittest"
|
||||
integration_systemd_script: |
|
||||
ssh -tt localhost "make -C /home/runc localintegration RUNC_USE_SYSTEMD=yes"
|
||||
integration_fs_script: |
|
||||
ssh -tt localhost "make -C /home/runc localintegration"
|
||||
integration_systemd_rootless_script: |
|
||||
echo "SKIP: integration_systemd_rootless_script requires cgroup v2"
|
||||
integration_fs_rootless_script: |
|
||||
case $DISTRO in
|
||||
centos-7)
|
||||
echo "SKIP: FIXME: integration_fs_rootless_script is skipped because of EPERM on writing cgroup.procs"
|
||||
;;
|
||||
centos-stream-8)
|
||||
ssh -tt localhost "make -C /home/runc localrootlessintegration"
|
||||
;;
|
||||
esac
|
2
vendor/github.com/opencontainers/runc/Makefile
generated
vendored
2
vendor/github.com/opencontainers/runc/Makefile
generated
vendored
@ -27,7 +27,7 @@ endif
|
||||
GO_BUILD := $(GO) build -trimpath $(MOD_VENDOR) $(GO_BUILDMODE) $(EXTRA_FLAGS) -tags "$(BUILDTAGS)" \
|
||||
-ldflags "-X main.gitCommit=$(COMMIT) -X main.version=$(VERSION) $(EXTRA_LDFLAGS)"
|
||||
GO_BUILD_STATIC := CGO_ENABLED=1 $(GO) build -trimpath $(MOD_VENDOR) $(EXTRA_FLAGS) -tags "$(BUILDTAGS) netgo osusergo" \
|
||||
-ldflags "-w -extldflags -static -X main.gitCommit=$(COMMIT) -X main.version=$(VERSION) $(EXTRA_LDFLAGS)"
|
||||
-ldflags "-extldflags -static -X main.gitCommit=$(COMMIT) -X main.version=$(VERSION) $(EXTRA_LDFLAGS)"
|
||||
|
||||
.DEFAULT: runc
|
||||
|
||||
|
2
vendor/github.com/opencontainers/runc/VERSION
generated
vendored
2
vendor/github.com/opencontainers/runc/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.0.1
|
||||
1.0.2
|
||||
|
52
vendor/github.com/opencontainers/runc/Vagrantfile.centos7
generated
vendored
52
vendor/github.com/opencontainers/runc/Vagrantfile.centos7
generated
vendored
@ -1,52 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "centos/7"
|
||||
config.vm.provider :virtualbox do |v|
|
||||
v.memory = 2048
|
||||
v.cpus = 2
|
||||
end
|
||||
config.vm.provider :libvirt do |v|
|
||||
v.memory = 2048
|
||||
v.cpus = 2
|
||||
end
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
set -e -u -o pipefail
|
||||
|
||||
# configuration
|
||||
GO_VERSION="1.16.4"
|
||||
BATS_VERSION="v1.3.0"
|
||||
|
||||
# install yum packages
|
||||
yum install -y -q epel-release
|
||||
(cd /etc/yum.repos.d && curl -O https://copr.fedorainfracloud.org/coprs/adrian/criu-el7/repo/epel-7/adrian-criu-el7-epel-7.repo)
|
||||
yum install -y -q gcc git iptables jq glibc-static libseccomp-devel make criu
|
||||
yum clean all
|
||||
|
||||
# install Go
|
||||
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
|
||||
|
||||
# install bats
|
||||
git clone https://github.com/bats-core/bats-core
|
||||
cd bats-core
|
||||
git checkout $BATS_VERSION
|
||||
./install.sh /usr/local
|
||||
cd ..
|
||||
rm -rf bats-core
|
||||
|
||||
# set PATH (NOTE: sudo without -i ignores this PATH)
|
||||
cat >> /etc/profile.d/sh.local <<EOF
|
||||
PATH=/usr/local/go/bin:/usr/local/bin:$PATH
|
||||
export PATH
|
||||
EOF
|
||||
source /etc/profile.d/sh.local
|
||||
|
||||
# sysctl
|
||||
echo "user.max_user_namespaces=15076" > /etc/sysctl.d/userns.conf
|
||||
sysctl --system
|
||||
|
||||
# Add a user for rootless tests
|
||||
useradd -u2000 -m -d/home/rootless -s/bin/bash rootless
|
||||
SHELL
|
||||
end
|
24
vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
generated
vendored
24
vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go
generated
vendored
@ -4,6 +4,7 @@ package fs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
@ -11,6 +12,7 @@ import (
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type CpuGroup struct{}
|
||||
@ -71,15 +73,33 @@ func (s *CpuGroup) Set(path string, r *configs.Resources) error {
|
||||
return fmt.Errorf("the minimum allowed cpu-shares is %d", sharesRead)
|
||||
}
|
||||
}
|
||||
|
||||
var period string
|
||||
if r.CpuPeriod != 0 {
|
||||
if err := cgroups.WriteFile(path, "cpu.cfs_period_us", strconv.FormatUint(r.CpuPeriod, 10)); err != nil {
|
||||
return err
|
||||
period = strconv.FormatUint(r.CpuPeriod, 10)
|
||||
if err := cgroups.WriteFile(path, "cpu.cfs_period_us", period); err != nil {
|
||||
// Sometimes when the period to be set is smaller
|
||||
// than the current one, it is rejected by the kernel
|
||||
// (EINVAL) as old_quota/new_period exceeds the parent
|
||||
// cgroup quota limit. If this happens and the quota is
|
||||
// going to be set, ignore the error for now and retry
|
||||
// after setting the quota.
|
||||
if !errors.Is(err, unix.EINVAL) || r.CpuQuota == 0 {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
period = ""
|
||||
}
|
||||
}
|
||||
if r.CpuQuota != 0 {
|
||||
if err := cgroups.WriteFile(path, "cpu.cfs_quota_us", strconv.FormatInt(r.CpuQuota, 10)); err != nil {
|
||||
return err
|
||||
}
|
||||
if period != "" {
|
||||
if err := cgroups.WriteFile(path, "cpu.cfs_period_us", period); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return s.SetRtSched(path, r)
|
||||
}
|
||||
|
12
vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go
generated
vendored
12
vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/common.go
generated
vendored
@ -310,6 +310,14 @@ func getUnitName(c *configs.Cgroup) string {
|
||||
return c.Name
|
||||
}
|
||||
|
||||
// This code should be in sync with getUnitName.
|
||||
func getUnitType(unitName string) string {
|
||||
if strings.HasSuffix(unitName, ".slice") {
|
||||
return "Slice"
|
||||
}
|
||||
return "Scope"
|
||||
}
|
||||
|
||||
// isDbusError returns true if the error is a specific dbus error.
|
||||
func isDbusError(err error, name string) bool {
|
||||
if err != nil {
|
||||
@ -388,10 +396,10 @@ func resetFailedUnit(cm *dbusConnManager, name string) {
|
||||
}
|
||||
}
|
||||
|
||||
func getUnitProperty(cm *dbusConnManager, unitName string, propertyName string) (*systemdDbus.Property, error) {
|
||||
func getUnitTypeProperty(cm *dbusConnManager, unitName string, unitType string, propertyName string) (*systemdDbus.Property, error) {
|
||||
var prop *systemdDbus.Property
|
||||
err := cm.retryOnDisconnect(func(c *systemdDbus.Conn) (Err error) {
|
||||
prop, Err = c.GetUnitPropertyContext(context.TODO(), unitName, propertyName)
|
||||
prop, Err = c.GetUnitTypePropertyContext(context.TODO(), unitName, unitType, propertyName)
|
||||
return Err
|
||||
})
|
||||
return prop, err
|
||||
|
25
vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go
generated
vendored
25
vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd/v1.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@ -345,6 +346,11 @@ func (m *legacyManager) freezeBeforeSet(unitName string, r *configs.Resources) (
|
||||
// Special case for SkipDevices, as used by Kubernetes to create pod
|
||||
// cgroups with allow-all device policy).
|
||||
if r.SkipDevices {
|
||||
if r.SkipFreezeOnSet {
|
||||
// Both needsFreeze and needsThaw are false.
|
||||
return
|
||||
}
|
||||
|
||||
// No need to freeze if SkipDevices is set, and either
|
||||
// (1) systemd unit does not (yet) exist, or
|
||||
// (2) it has DevicePolicy=auto and empty DeviceAllow list.
|
||||
@ -353,15 +359,20 @@ func (m *legacyManager) freezeBeforeSet(unitName string, r *configs.Resources) (
|
||||
// a non-existent unit returns default properties,
|
||||
// and settings in (2) are the defaults.
|
||||
//
|
||||
// Do not return errors from getUnitProperty, as they alone
|
||||
// Do not return errors from getUnitTypeProperty, as they alone
|
||||
// should not prevent Set from working.
|
||||
devPolicy, e := getUnitProperty(m.dbus, unitName, "DevicePolicy")
|
||||
|
||||
unitType := getUnitType(unitName)
|
||||
|
||||
devPolicy, e := getUnitTypeProperty(m.dbus, unitName, unitType, "DevicePolicy")
|
||||
if e == nil && devPolicy.Value == dbus.MakeVariant("auto") {
|
||||
devAllow, e := getUnitProperty(m.dbus, unitName, "DeviceAllow")
|
||||
if e == nil && devAllow.Value == dbus.MakeVariant([]deviceAllowEntry{}) {
|
||||
needsFreeze = false
|
||||
needsThaw = false
|
||||
return
|
||||
devAllow, e := getUnitTypeProperty(m.dbus, unitName, unitType, "DeviceAllow")
|
||||
if e == nil {
|
||||
if rv := reflect.ValueOf(devAllow.Value.Value()); rv.Kind() == reflect.Slice && rv.Len() == 0 {
|
||||
needsFreeze = false
|
||||
needsThaw = false
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
12
vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
generated
vendored
12
vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go
generated
vendored
@ -131,4 +131,16 @@ type Resources struct {
|
||||
//
|
||||
// NOTE it is impossible to start a container which has this flag set.
|
||||
SkipDevices bool `json:"-"`
|
||||
|
||||
// SkipFreezeOnSet is a flag for cgroup manager to skip the cgroup
|
||||
// freeze when setting resources. Only applicable to systemd legacy
|
||||
// (i.e. cgroup v1) manager (which uses freeze by default to avoid
|
||||
// spurious permission errors caused by systemd inability to update
|
||||
// device rules in a non-disruptive manner).
|
||||
//
|
||||
// If not set, a few methods (such as looking into cgroup's
|
||||
// devices.list and querying the systemd unit properties) are used
|
||||
// during Set() to figure out whether the freeze is required. Those
|
||||
// methods may be relatively slow, thus this flag.
|
||||
SkipFreezeOnSet bool `json:"-"`
|
||||
}
|
||||
|
14
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
14
vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
generated
vendored
@ -142,7 +142,7 @@ int setns(int fd, int nstype)
|
||||
|
||||
static void write_log(const char *level, const char *format, ...)
|
||||
{
|
||||
char *message = NULL, *stage = NULL;
|
||||
char *message = NULL, *stage = NULL, *json = NULL;
|
||||
va_list args;
|
||||
int ret;
|
||||
|
||||
@ -164,11 +164,21 @@ static void write_log(const char *level, const char *format, ...)
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
dprintf(logfd, "{\"level\":\"%s\", \"msg\": \"%s[%d]: %s\"}\n", level, stage, getpid(), message);
|
||||
ret = asprintf(&json, "{\"level\":\"%s\", \"msg\": \"%s[%d]: %s\"}\n", level, stage, getpid(), message);
|
||||
if (ret < 0) {
|
||||
json = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* This logging is on a best-effort basis. In case of a short or failed
|
||||
* write there is nothing we can do, so just ignore write() errors.
|
||||
*/
|
||||
ssize_t __attribute__((unused)) __res = write(logfd, json, ret);
|
||||
|
||||
out:
|
||||
free(message);
|
||||
free(stage);
|
||||
free(json);
|
||||
}
|
||||
|
||||
/* XXX: This is ugly. */
|
||||
|
21
vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
generated
vendored
21
vendor/github.com/opencontainers/runc/libcontainer/seccomp/seccomp_linux.go
generated
vendored
@ -67,7 +67,7 @@ func InitSeccomp(config *configs.Seccomp) error {
|
||||
if call == nil {
|
||||
return errors.New("encountered nil syscall while initializing Seccomp")
|
||||
}
|
||||
if err := matchCall(filter, call); err != nil {
|
||||
if err := matchCall(filter, call, defaultAction); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -142,7 +142,7 @@ func getCondition(arg *configs.Arg) (libseccomp.ScmpCondition, error) {
|
||||
}
|
||||
|
||||
// Add a rule to match a single syscall
|
||||
func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
|
||||
func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall, defAct libseccomp.ScmpAction) error {
|
||||
if call == nil || filter == nil {
|
||||
return errors.New("cannot use nil as syscall to block")
|
||||
}
|
||||
@ -151,6 +151,17 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
|
||||
return errors.New("empty string is not a valid syscall")
|
||||
}
|
||||
|
||||
// Convert the call's action to the libseccomp equivalent
|
||||
callAct, err := getAction(call.Action, call.ErrnoRet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("action in seccomp profile is invalid: %w", err)
|
||||
}
|
||||
if callAct == defAct {
|
||||
// This rule is redundant, silently skip it
|
||||
// to avoid error from AddRule.
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we can't resolve the syscall, assume it's not supported on this kernel
|
||||
// Ignore it, don't error out
|
||||
callNum, err := libseccomp.GetSyscallFromName(call.Name)
|
||||
@ -158,12 +169,6 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert the call's action to the libseccomp equivalent
|
||||
callAct, err := getAction(call.Action, call.ErrnoRet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("action in seccomp profile is invalid: %s", err)
|
||||
}
|
||||
|
||||
// Unconditional match - just add the rule
|
||||
if len(call.Args) == 0 {
|
||||
if err := filter.AddRule(callNum, callAct); err != nil {
|
||||
|
27
vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE
generated
vendored
Normal file
27
vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS
generated
vendored
Normal file
22
vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "22"
|
||||
gitVersion = "v1.22.1-k3s1"
|
||||
gitCommit = "1d402161376010a3f98defedab2be2941b04abb1"
|
||||
gitVersion = "v1.22.2-k3s1"
|
||||
gitCommit = "c4ebe1924cba7aeae2a97729df3e0ac4a2ce5459"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-08-20T09:21:43Z"
|
||||
buildDate = "2021-09-16T19:43:43Z"
|
||||
)
|
||||
|
27
vendor/k8s.io/client-go/third_party/forked/golang/LICENSE
generated
vendored
Normal file
27
vendor/k8s.io/client-go/third_party/forked/golang/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/k8s.io/client-go/third_party/forked/golang/PATENTS
generated
vendored
Normal file
22
vendor/k8s.io/client-go/third_party/forked/golang/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
3
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
3
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
@ -132,9 +132,6 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
if len(results) == 0 {
|
||||
break
|
||||
}
|
||||
fullResult = append(fullResult, results)
|
||||
}
|
||||
return fullResult, nil
|
||||
|
2
vendor/k8s.io/cloud-provider/go.mod
generated
vendored
2
vendor/k8s.io/cloud-provider/go.mod
generated
vendored
@ -16,7 +16,7 @@ require (
|
||||
k8s.io/component-base v0.0.0
|
||||
k8s.io/controller-manager v0.0.0
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
|
||||
)
|
||||
|
||||
replace (
|
||||
|
4
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
4
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
@ -782,8 +782,8 @@ k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "22"
|
||||
gitVersion = "v1.22.1-k3s1"
|
||||
gitCommit = "1d402161376010a3f98defedab2be2941b04abb1"
|
||||
gitVersion = "v1.22.2-k3s1"
|
||||
gitCommit = "c4ebe1924cba7aeae2a97729df3e0ac4a2ce5459"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-08-20T09:21:43Z"
|
||||
buildDate = "2021-09-16T19:43:43Z"
|
||||
)
|
||||
|
106
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
106
vendor/k8s.io/kubernetes/pkg/controller/job/job_controller.go
generated
vendored
@ -61,7 +61,8 @@ const (
|
||||
// maxUncountedPods is the maximum size the slices in
|
||||
// .status.uncountedTerminatedPods should have to keep their representation
|
||||
// roughly below 20 KB.
|
||||
maxUncountedPods = 500
|
||||
maxUncountedPods = 500
|
||||
maxPodCreateDeletePerSync = 500
|
||||
)
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
@ -71,8 +72,7 @@ var (
|
||||
// DefaultJobBackOff is the default backoff period, exported for the e2e test
|
||||
DefaultJobBackOff = 10 * time.Second
|
||||
// MaxJobBackOff is the max backoff period, exported for the e2e test
|
||||
MaxJobBackOff = 360 * time.Second
|
||||
maxPodCreateDeletePerSync = 500
|
||||
MaxJobBackOff = 360 * time.Second
|
||||
)
|
||||
|
||||
// Controller ensures that all Job objects have corresponding pods to
|
||||
@ -888,9 +888,18 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(job *batch.Job, pods []*
|
||||
uncountedStatus := job.Status.UncountedTerminatedPods
|
||||
var newSucceededIndexes []int
|
||||
if isIndexed {
|
||||
// Sort to introduce completed Indexes First.
|
||||
// Sort to introduce completed Indexes in order.
|
||||
sort.Sort(byCompletionIndex(pods))
|
||||
}
|
||||
uidsWithFinalizer := make(sets.String, len(pods))
|
||||
for _, p := range pods {
|
||||
if hasJobTrackingFinalizer(p) {
|
||||
uidsWithFinalizer.Insert(string(p.UID))
|
||||
}
|
||||
}
|
||||
if cleanUncountedPodsWithoutFinalizers(&job.Status, uidsWithFinalizer) {
|
||||
needsFlush = true
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if !hasJobTrackingFinalizer(pod) {
|
||||
continue
|
||||
@ -924,14 +933,14 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(job *batch.Job, pods []*
|
||||
uncountedStatus.Failed = append(uncountedStatus.Failed, pod.UID)
|
||||
}
|
||||
}
|
||||
if len(uncountedStatus.Succeeded)+len(uncountedStatus.Failed) >= maxUncountedPods {
|
||||
if len(newSucceededIndexes)+len(uncountedStatus.Succeeded)+len(uncountedStatus.Failed) >= maxUncountedPods {
|
||||
if len(newSucceededIndexes) > 0 {
|
||||
succeededIndexes = succeededIndexes.withOrderedIndexes(newSucceededIndexes)
|
||||
job.Status.Succeeded = int32(succeededIndexes.total())
|
||||
job.Status.CompletedIndexes = succeededIndexes.String()
|
||||
}
|
||||
var err error
|
||||
if needsFlush, err = jm.flushUncountedAndRemoveFinalizers(job, podsToRemoveFinalizer, needsFlush); err != nil {
|
||||
if needsFlush, err = jm.flushUncountedAndRemoveFinalizers(job, podsToRemoveFinalizer, uidsWithFinalizer, needsFlush); err != nil {
|
||||
return err
|
||||
}
|
||||
podsToRemoveFinalizer = nil
|
||||
@ -944,7 +953,7 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(job *batch.Job, pods []*
|
||||
job.Status.CompletedIndexes = succeededIndexes.String()
|
||||
}
|
||||
var err error
|
||||
if needsFlush, err = jm.flushUncountedAndRemoveFinalizers(job, podsToRemoveFinalizer, needsFlush); err != nil {
|
||||
if needsFlush, err = jm.flushUncountedAndRemoveFinalizers(job, podsToRemoveFinalizer, uidsWithFinalizer, needsFlush); err != nil {
|
||||
return err
|
||||
}
|
||||
if jm.enactJobFinished(job, finishedCond) {
|
||||
@ -967,44 +976,29 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(job *batch.Job, pods []*
|
||||
// 4. (if not all removals succeeded) flush Job status again.
|
||||
// Returns whether there are pending changes in the Job status that need to be
|
||||
// flushed in subsequent calls.
|
||||
func (jm *Controller) flushUncountedAndRemoveFinalizers(job *batch.Job, podsToRemoveFinalizer []*v1.Pod, needsFlush bool) (bool, error) {
|
||||
func (jm *Controller) flushUncountedAndRemoveFinalizers(job *batch.Job, podsToRemoveFinalizer []*v1.Pod, uidsWithFinalizer sets.String, needsFlush bool) (bool, error) {
|
||||
if needsFlush {
|
||||
if err := jm.updateStatusHandler(job); err != nil {
|
||||
return needsFlush, fmt.Errorf("adding uncounted pods to status: %w", err)
|
||||
}
|
||||
needsFlush = false
|
||||
}
|
||||
var failedToRm []*v1.Pod
|
||||
var rmErr error
|
||||
if len(podsToRemoveFinalizer) > 0 {
|
||||
failedToRm, rmErr = jm.removeTrackingFinalizerFromPods(podsToRemoveFinalizer)
|
||||
var rmSucceded []bool
|
||||
rmSucceded, rmErr = jm.removeTrackingFinalizerFromPods(podsToRemoveFinalizer)
|
||||
for i, p := range podsToRemoveFinalizer {
|
||||
if rmSucceded[i] {
|
||||
uidsWithFinalizer.Delete(string(p.UID))
|
||||
}
|
||||
}
|
||||
}
|
||||
uncountedStatus := job.Status.UncountedTerminatedPods
|
||||
if rmErr == nil {
|
||||
needsFlush = len(uncountedStatus.Succeeded) > 0 || len(uncountedStatus.Failed) > 0
|
||||
job.Status.Succeeded += int32(len(uncountedStatus.Succeeded))
|
||||
uncountedStatus.Succeeded = nil
|
||||
job.Status.Failed += int32(len(uncountedStatus.Failed))
|
||||
uncountedStatus.Failed = nil
|
||||
return needsFlush, nil
|
||||
}
|
||||
uidsWithFinalizer := make(sets.String, len(failedToRm))
|
||||
for _, p := range failedToRm {
|
||||
uidsWithFinalizer.Insert(string(p.UID))
|
||||
}
|
||||
newUncounted := uncountedWithFailedFinalizerRemovals(uncountedStatus.Succeeded, uidsWithFinalizer)
|
||||
if len(newUncounted) != len(uncountedStatus.Succeeded) {
|
||||
// Failed to remove some finalizers. Attempt to update the status with the
|
||||
// partial progress.
|
||||
if cleanUncountedPodsWithoutFinalizers(&job.Status, uidsWithFinalizer) {
|
||||
needsFlush = true
|
||||
job.Status.Succeeded += int32(len(uncountedStatus.Succeeded) - len(newUncounted))
|
||||
uncountedStatus.Succeeded = newUncounted
|
||||
}
|
||||
newUncounted = uncountedWithFailedFinalizerRemovals(uncountedStatus.Failed, uidsWithFinalizer)
|
||||
if len(newUncounted) != len(uncountedStatus.Failed) {
|
||||
needsFlush = true
|
||||
job.Status.Failed += int32(len(uncountedStatus.Failed) - len(newUncounted))
|
||||
uncountedStatus.Failed = newUncounted
|
||||
}
|
||||
if needsFlush {
|
||||
if rmErr != nil && needsFlush {
|
||||
if err := jm.updateStatusHandler(job); err != nil {
|
||||
return needsFlush, fmt.Errorf("removing uncounted pods from status: %w", err)
|
||||
}
|
||||
@ -1012,14 +1006,35 @@ func (jm *Controller) flushUncountedAndRemoveFinalizers(job *batch.Job, podsToRe
|
||||
return needsFlush, rmErr
|
||||
}
|
||||
|
||||
// cleanUncountedPodsWithoutFinalizers removes the Pod UIDs from
|
||||
// .status.uncountedTerminatedPods for which the finalizer was successfully
|
||||
// removed and increments the corresponding status counters.
|
||||
// Returns whether there was any status change.
|
||||
func cleanUncountedPodsWithoutFinalizers(status *batch.JobStatus, uidsWithFinalizer sets.String) bool {
|
||||
updated := false
|
||||
uncountedStatus := status.UncountedTerminatedPods
|
||||
newUncounted := filterInUncountedUIDs(uncountedStatus.Succeeded, uidsWithFinalizer)
|
||||
if len(newUncounted) != len(uncountedStatus.Succeeded) {
|
||||
updated = true
|
||||
status.Succeeded += int32(len(uncountedStatus.Succeeded) - len(newUncounted))
|
||||
uncountedStatus.Succeeded = newUncounted
|
||||
}
|
||||
newUncounted = filterInUncountedUIDs(uncountedStatus.Failed, uidsWithFinalizer)
|
||||
if len(newUncounted) != len(uncountedStatus.Failed) {
|
||||
updated = true
|
||||
status.Failed += int32(len(uncountedStatus.Failed) - len(newUncounted))
|
||||
uncountedStatus.Failed = newUncounted
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
// removeTrackingFinalizerFromPods removes tracking finalizers from Pods and
|
||||
// returns the pod for which the operation failed (if the pod was deleted when
|
||||
// this function was called, it's considered as the finalizer was removed
|
||||
// successfully).
|
||||
func (jm *Controller) removeTrackingFinalizerFromPods(pods []*v1.Pod) ([]*v1.Pod, error) {
|
||||
// returns an array of booleans where the i-th value is true if the finalizer
|
||||
// of the i-th Pod was successfully removed (if the pod was deleted when this
|
||||
// function was called, it's considered as the finalizer was removed successfully).
|
||||
func (jm *Controller) removeTrackingFinalizerFromPods(pods []*v1.Pod) ([]bool, error) {
|
||||
errCh := make(chan error, len(pods))
|
||||
var failed []*v1.Pod
|
||||
var lock sync.Mutex
|
||||
succeeded := make([]bool, len(pods))
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(pods))
|
||||
for i := range pods {
|
||||
@ -1030,16 +1045,15 @@ func (jm *Controller) removeTrackingFinalizerFromPods(pods []*v1.Pod) ([]*v1.Pod
|
||||
if err := jm.podControl.PatchPod(pod.Namespace, pod.Name, patch); err != nil && !apierrors.IsNotFound(err) {
|
||||
errCh <- err
|
||||
utilruntime.HandleError(err)
|
||||
lock.Lock()
|
||||
failed = append(failed, pod)
|
||||
lock.Unlock()
|
||||
return
|
||||
}
|
||||
succeeded[i] = true
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
return failed, errorFromChannel(errCh)
|
||||
|
||||
return succeeded, errorFromChannel(errCh)
|
||||
}
|
||||
|
||||
// enactJobFinished adds the Complete or Failed condition and records events.
|
||||
@ -1072,10 +1086,10 @@ func (jm *Controller) enactJobFinished(job *batch.Job, finishedCond *batch.JobCo
|
||||
return true
|
||||
}
|
||||
|
||||
func uncountedWithFailedFinalizerRemovals(uncounted []types.UID, uidsWithFinalizer sets.String) []types.UID {
|
||||
func filterInUncountedUIDs(uncounted []types.UID, include sets.String) []types.UID {
|
||||
var newUncounted []types.UID
|
||||
for _, uid := range uncounted {
|
||||
if uidsWithFinalizer.Has(string(uid)) {
|
||||
if include.Has(string(uid)) {
|
||||
newUncounted = append(newUncounted, uid)
|
||||
}
|
||||
}
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
@ -389,7 +389,8 @@ func getSupportedUnifiedControllers() sets.String {
|
||||
|
||||
func (m *cgroupManagerImpl) toResources(resourceConfig *ResourceConfig) *libcontainerconfigs.Resources {
|
||||
resources := &libcontainerconfigs.Resources{
|
||||
SkipDevices: true,
|
||||
SkipDevices: true,
|
||||
SkipFreezeOnSet: true,
|
||||
}
|
||||
if resourceConfig == nil {
|
||||
return resources
|
||||
|
17
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
@ -95,6 +95,10 @@ func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
|
||||
// GetActivePods returns pods that may have a running container (a
|
||||
// terminated pod is one that is known to have no running containers and
|
||||
// will not get any more).
|
||||
//
|
||||
// TODO: This method must include pods that have been force deleted from
|
||||
// the config source (and thus removed from the pod manager) but are still
|
||||
// terminating.
|
||||
func (kl *Kubelet) GetActivePods() []*v1.Pod {
|
||||
allPods := kl.podManager.GetPods()
|
||||
activePods := kl.filterOutTerminatedPods(allPods)
|
||||
@ -964,12 +968,17 @@ func (kl *Kubelet) podResourcesAreReclaimed(pod *v1.Pod) bool {
|
||||
return kl.PodResourcesAreReclaimed(pod, status)
|
||||
}
|
||||
|
||||
// filterOutTerminatedPods returns the pods that could still have running
|
||||
// containers
|
||||
// filterOutTerminatedPods returns pods that are not in a terminal phase
|
||||
// or are known to be fully terminated. This method should only be used
|
||||
// when the set of pods being filtered is upstream of the pod worker, i.e.
|
||||
// the pods the pod manager is aware of.
|
||||
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
|
||||
var filteredPods []*v1.Pod
|
||||
filteredPods := make([]*v1.Pod, 0, len(pods))
|
||||
for _, p := range pods {
|
||||
if !kl.podWorkers.CouldHaveRunningContainers(p.UID) {
|
||||
if kl.podWorkers.IsPodKnownTerminated(p.UID) {
|
||||
continue
|
||||
}
|
||||
if p.Status.Phase == v1.PodSucceeded || p.Status.Phase == v1.PodFailed {
|
||||
continue
|
||||
}
|
||||
filteredPods = append(filteredPods, p)
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go
generated
vendored
@ -130,6 +130,14 @@ type PodWorkers interface {
|
||||
// true.
|
||||
SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorkType
|
||||
|
||||
// IsPodKnownTerminated returns true if the provided pod UID is known by the pod
|
||||
// worker to be terminated. If the pod has been force deleted and the pod worker
|
||||
// has completed termination this method will return false, so this method should
|
||||
// only be used to filter out pods from the desired set such as in admission.
|
||||
//
|
||||
// Intended for use by the kubelet config loops, but not subsystems, which should
|
||||
// use ShouldPod*().
|
||||
IsPodKnownTerminated(uid types.UID) bool
|
||||
// CouldHaveRunningContainers returns true before the pod workers have synced,
|
||||
// once the pod workers see the pod (syncPod could be called), and returns false
|
||||
// after the pod has been terminated (running containers guaranteed stopped).
|
||||
@ -394,6 +402,16 @@ func newPodWorkers(
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) IsPodKnownTerminated(uid types.UID) bool {
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
if status, ok := p.podSyncStatuses[uid]; ok {
|
||||
return status.IsTerminated()
|
||||
}
|
||||
// if the pod is not known, we return false (pod worker is not aware of it)
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *podWorkers) CouldHaveRunningContainers(uid types.UID) bool {
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/util/manager/watch_based_manager.go
generated
vendored
@ -95,7 +95,11 @@ func (i *objectCacheItem) setImmutable() {
|
||||
func (i *objectCacheItem) stopIfIdle(now time.Time, maxIdleTime time.Duration) bool {
|
||||
i.lock.Lock()
|
||||
defer i.lock.Unlock()
|
||||
if !i.stopped && now.After(i.lastAccessTime.Add(maxIdleTime)) {
|
||||
// Ensure that we don't try to stop not yet initialized reflector.
|
||||
// In case of overloaded kube-apiserver, if the list request is
|
||||
// already being processed, all the work would lost and would have
|
||||
// to be retried.
|
||||
if !i.stopped && i.store.hasSynced() && now.After(i.lastAccessTime.Add(maxIdleTime)) {
|
||||
return i.stopThreadUnsafe()
|
||||
}
|
||||
return false
|
||||
@ -287,11 +291,14 @@ func (c *objectCache) Get(namespace, name string) (runtime.Object, error) {
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("object %q/%q not registered", namespace, name)
|
||||
}
|
||||
// Record last access time independently if it succeeded or not.
|
||||
// This protects from premature (racy) reflector closure.
|
||||
item.setLastAccessTime(c.clock.Now())
|
||||
|
||||
item.restartReflectorIfNeeded()
|
||||
if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil {
|
||||
return nil, fmt.Errorf("failed to sync %s cache: %v", c.groupResource.String(), err)
|
||||
}
|
||||
item.setLastAccessTime(c.clock.Now())
|
||||
obj, exists, err := item.store.GetByKey(c.key(namespace, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/registry/core/service/strategy.go
generated
vendored
@ -318,6 +318,20 @@ func patchAllocatedValues(newSvc, oldSvc *api.Service) {
|
||||
}
|
||||
|
||||
if needsNodePort(oldSvc) && needsNodePort(newSvc) {
|
||||
nodePortsUsed := func(svc *api.Service) sets.Int32 {
|
||||
used := sets.NewInt32()
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.NodePort != 0 {
|
||||
used.Insert(p.NodePort)
|
||||
}
|
||||
}
|
||||
return used
|
||||
}
|
||||
|
||||
// Build a set of all the ports in oldSvc that are also in newSvc. We know
|
||||
// we can't patch these values.
|
||||
used := nodePortsUsed(oldSvc).Intersection(nodePortsUsed(newSvc))
|
||||
|
||||
// Map NodePorts by name. The user may have changed other properties
|
||||
// of the port, but we won't see that here.
|
||||
np := map[string]int32{}
|
||||
@ -325,10 +339,16 @@ func patchAllocatedValues(newSvc, oldSvc *api.Service) {
|
||||
p := &oldSvc.Spec.Ports[i]
|
||||
np[p.Name] = p.NodePort
|
||||
}
|
||||
|
||||
// If newSvc is missing values, try to patch them in when we know them and
|
||||
// they haven't been used for another port.
|
||||
for i := range newSvc.Spec.Ports {
|
||||
p := &newSvc.Spec.Ports[i]
|
||||
if p.NodePort == 0 {
|
||||
p.NodePort = np[p.Name]
|
||||
oldVal := np[p.Name]
|
||||
if !used.Has(oldVal) {
|
||||
p.NodePort = oldVal
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
@ -539,7 +539,7 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) {
|
||||
// Run PostFilter plugins to try to make the pod schedulable in a future scheduling cycle.
|
||||
result, status := fwk.RunPostFilterPlugins(ctx, state, pod, fitError.Diagnosis.NodeToStatusMap)
|
||||
if status.Code() == framework.Error {
|
||||
klog.ErrorS(nil, "Status after running PostFilter plugins for pod", klog.KObj(pod), "status", status)
|
||||
klog.ErrorS(nil, "Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", status)
|
||||
} else {
|
||||
klog.V(5).InfoS("Status after running PostFilter plugins for pod", "pod", klog.KObj(pod), "status", status)
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE
generated
vendored
2
vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
58
vendor/k8s.io/legacy-cloud-providers/aws/aws.go
generated
vendored
58
vendor/k8s.io/legacy-cloud-providers/aws/aws.go
generated
vendored
@ -35,7 +35,6 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
@ -821,8 +820,11 @@ func (p *awsSDKProvider) Compute(regionName string) (EC2, error) {
|
||||
}
|
||||
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
|
||||
WithEndpointResolver(p.cfg.getResolver())
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: *awsConfig,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
@ -843,8 +845,10 @@ func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) {
|
||||
}
|
||||
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
|
||||
WithEndpointResolver(p.cfg.getResolver())
|
||||
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: *awsConfig,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
@ -861,8 +865,10 @@ func (p *awsSDKProvider) LoadBalancingV2(regionName string) (ELBV2, error) {
|
||||
}
|
||||
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
|
||||
WithEndpointResolver(p.cfg.getResolver())
|
||||
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: *awsConfig,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
@ -880,8 +886,10 @@ func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
|
||||
}
|
||||
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
|
||||
WithEndpointResolver(p.cfg.getResolver())
|
||||
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: *awsConfig,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
@ -911,8 +919,10 @@ func (p *awsSDKProvider) KeyManagement(regionName string) (KMS, error) {
|
||||
}
|
||||
awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true).
|
||||
WithEndpointResolver(p.cfg.getResolver())
|
||||
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: *awsConfig,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
@ -1170,30 +1180,28 @@ func init() {
|
||||
return nil, fmt.Errorf("unable to validate custom endpoint overrides: %v", err)
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{})
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: aws.Config{},
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to initialize AWS session: %v", err)
|
||||
}
|
||||
|
||||
var provider credentials.Provider
|
||||
if cfg.Global.RoleARN == "" {
|
||||
provider = &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.New(sess),
|
||||
}
|
||||
} else {
|
||||
var creds *credentials.Credentials
|
||||
if cfg.Global.RoleARN != "" {
|
||||
klog.Infof("Using AWS assumed role %v", cfg.Global.RoleARN)
|
||||
provider = &stscreds.AssumeRoleProvider{
|
||||
provider := &stscreds.AssumeRoleProvider{
|
||||
Client: sts.New(sess),
|
||||
RoleARN: cfg.Global.RoleARN,
|
||||
}
|
||||
}
|
||||
|
||||
creds := credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
provider,
|
||||
&credentials.SharedCredentialsProvider{},
|
||||
})
|
||||
creds = credentials.NewChainCredentials(
|
||||
[]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
provider,
|
||||
})
|
||||
}
|
||||
|
||||
aws := newAWSSDKProvider(creds, cfg)
|
||||
return newAWSCloud(*cfg, aws)
|
||||
|
19
vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go
generated
vendored
19
vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go
generated
vendored
@ -20,6 +20,7 @@ package azure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
@ -29,6 +30,8 @@ import (
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog/v2"
|
||||
azcache "k8s.io/legacy-cloud-providers/azure/cache"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -233,10 +236,22 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st
|
||||
|
||||
return false, err
|
||||
}
|
||||
klog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
|
||||
klog.V(3).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
|
||||
|
||||
provisioningState, err := az.VMSet.GetProvisioningStateByNodeName(string(nodeName))
|
||||
if err != nil {
|
||||
// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
|
||||
if errors.Is(err, cloudprovider.InstanceNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
klog.V(3).Infof("InstanceShutdownByProviderID gets provisioning state %q for node %q", provisioningState, nodeName)
|
||||
|
||||
status := strings.ToLower(powerStatus)
|
||||
return status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating, nil
|
||||
provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(compute.ProvisioningStateSucceeded)))
|
||||
return provisioningSucceeded && (status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating), nil
|
||||
}
|
||||
|
||||
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
|
||||
|
18
vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
generated
vendored
18
vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
generated
vendored
@ -1858,18 +1858,18 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
sharedRuleName := az.getSecurityRuleName(service, port, sourceAddressPrefix)
|
||||
sharedIndex, sharedRule, sharedRuleFound := findSecurityRuleByName(updatedRules, sharedRuleName)
|
||||
if !sharedRuleFound {
|
||||
klog.V(4).Infof("Expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("expected to find shared rule %s for service %s being deleted, but did not", sharedRuleName, service.Name)
|
||||
klog.V(4).Infof("Didn't find shared rule %s for service %s", sharedRuleName, service.Name)
|
||||
continue
|
||||
}
|
||||
if sharedRule.DestinationAddressPrefixes == nil {
|
||||
klog.V(4).Infof("Expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
return nil, fmt.Errorf("expected to have array of destinations in shared rule for service %s being deleted, but did not", service.Name)
|
||||
klog.V(4).Infof("Didn't find DestinationAddressPrefixes in shared rule for service %s", service.Name)
|
||||
continue
|
||||
}
|
||||
existingPrefixes := *sharedRule.DestinationAddressPrefixes
|
||||
addressIndex, found := findIndex(existingPrefixes, destinationIPAddress)
|
||||
if !found {
|
||||
klog.V(4).Infof("Expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
return nil, fmt.Errorf("expected to find destination address %s in shared rule %s for service %s being deleted, but did not", destinationIPAddress, sharedRuleName, service.Name)
|
||||
klog.V(4).Infof("Didn't find destination address %v in shared rule %s for service %s", destinationIPAddress, sharedRuleName, service.Name)
|
||||
continue
|
||||
}
|
||||
if len(existingPrefixes) == 1 {
|
||||
updatedRules = append(updatedRules[:sharedIndex], updatedRules[sharedIndex+1:]...)
|
||||
@ -2426,7 +2426,7 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b
|
||||
if !strings.EqualFold(to.String(existingRule.Name), to.String(rule.Name)) {
|
||||
continue
|
||||
}
|
||||
if existingRule.Protocol != rule.Protocol {
|
||||
if !strings.EqualFold(string(existingRule.Protocol), string(rule.Protocol)) {
|
||||
continue
|
||||
}
|
||||
if !strings.EqualFold(to.String(existingRule.SourcePortRange), to.String(rule.SourcePortRange)) {
|
||||
@ -2443,10 +2443,10 @@ func findSecurityRule(rules []network.SecurityRule, rule network.SecurityRule) b
|
||||
continue
|
||||
}
|
||||
}
|
||||
if existingRule.Access != rule.Access {
|
||||
if !strings.EqualFold(string(existingRule.Access), string(rule.Access)) {
|
||||
continue
|
||||
}
|
||||
if existingRule.Direction != rule.Direction {
|
||||
if !strings.EqualFold(string(existingRule.Direction), string(rule.Direction)) {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
|
14
vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go
generated
vendored
14
vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go
generated
vendored
@ -504,6 +504,20 @@ func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState str
|
||||
return vmPowerStateStopped, nil
|
||||
}
|
||||
|
||||
// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
|
||||
func (as *availabilitySet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) {
|
||||
vm, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return provisioningState, err
|
||||
}
|
||||
|
||||
if vm.VirtualMachineProperties == nil || vm.VirtualMachineProperties.ProvisioningState == nil {
|
||||
return provisioningState, nil
|
||||
}
|
||||
|
||||
return to.String(vm.VirtualMachineProperties.ProvisioningState), nil
|
||||
}
|
||||
|
||||
// GetNodeNameByProviderID gets the node name by provider ID.
|
||||
func (as *availabilitySet) GetNodeNameByProviderID(providerID string) (types.NodeName, error) {
|
||||
// NodeName is part of providerID for standard instances.
|
||||
|
5
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go
generated
vendored
5
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmsets.go
generated
vendored
@ -71,11 +71,14 @@ type VMSet interface {
|
||||
// DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI.
|
||||
DetachDisk(diskName, diskURI string, nodeName types.NodeName) error
|
||||
// GetDataDisks gets a list of data disks attached to the node.
|
||||
GetDataDisks(nodeName types.NodeName, string azcache.AzureCacheReadType) ([]compute.DataDisk, error)
|
||||
GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, error)
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
GetPowerStatusByNodeName(name string) (string, error)
|
||||
|
||||
// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
|
||||
GetProvisioningStateByNodeName(name string) (string, error)
|
||||
|
||||
// GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4)
|
||||
GetPrivateIPsByNodeName(name string) ([]string, error)
|
||||
|
||||
|
24
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
24
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
@ -243,6 +243,30 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
|
||||
return vmPowerStateStopped, nil
|
||||
}
|
||||
|
||||
// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
|
||||
func (ss *scaleSet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) {
|
||||
managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check isNodeManagedByAvailabilitySet: %v", err)
|
||||
return "", err
|
||||
}
|
||||
if managedByAS {
|
||||
// vm is managed by availability set.
|
||||
return ss.availabilitySet.GetProvisioningStateByNodeName(name)
|
||||
}
|
||||
|
||||
_, _, vm, err := ss.getVmssVM(name, azcache.CacheReadTypeDefault)
|
||||
if err != nil {
|
||||
return provisioningState, err
|
||||
}
|
||||
|
||||
if vm.VirtualMachineScaleSetVMProperties == nil || vm.VirtualMachineScaleSetVMProperties.ProvisioningState == nil {
|
||||
return provisioningState, nil
|
||||
}
|
||||
|
||||
return to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), nil
|
||||
}
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSetVM, error) {
|
||||
|
2
vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere_util.go
generated
vendored
2
vendor/k8s.io/legacy-cloud-providers/vsphere/vsphere_util.go
generated
vendored
@ -639,7 +639,7 @@ func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) {
|
||||
// Start go routines per VC-DC to check disks are attached
|
||||
wg.Add(1)
|
||||
go func(nodes []k8stypes.NodeName) {
|
||||
err := vs.checkNodeDisks(ctx, nodeNames)
|
||||
err := vs.checkNodeDisks(ctx, nodes)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err)
|
||||
}
|
||||
|
2
vendor/k8s.io/mount-utils/go.mod
generated
vendored
2
vendor/k8s.io/mount-utils/go.mod
generated
vendored
@ -11,7 +11,7 @@ require (
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
|
||||
)
|
||||
|
||||
replace k8s.io/mount-utils => ../mount-utils
|
||||
|
4
vendor/k8s.io/mount-utils/go.sum
generated
vendored
4
vendor/k8s.io/mount-utils/go.sum
generated
vendored
@ -28,5 +28,5 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
|
||||
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
|
49
vendor/k8s.io/utils/clock/clock.go
generated
vendored
49
vendor/k8s.io/utils/clock/clock.go
generated
vendored
@ -30,13 +30,36 @@ type PassiveClock interface {
|
||||
// needs to do arbitrary things based on time.
|
||||
type Clock interface {
|
||||
PassiveClock
|
||||
// After returns the channel of a new Timer.
|
||||
// This method does not allow to free/GC the backing timer before it fires. Use
|
||||
// NewTimer instead.
|
||||
After(d time.Duration) <-chan time.Time
|
||||
// NewTimer returns a new Timer.
|
||||
NewTimer(d time.Duration) Timer
|
||||
// Sleep sleeps for the provided duration d.
|
||||
// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
|
||||
Sleep(d time.Duration)
|
||||
// Tick returns the channel of a new Ticker.
|
||||
// This method does not allow to free/GC the backing ticker. Use
|
||||
// NewTicker from WithTicker instead.
|
||||
Tick(d time.Duration) <-chan time.Time
|
||||
}
|
||||
|
||||
var _ = Clock(RealClock{})
|
||||
// WithTicker allows for injecting fake or real clocks into code that
|
||||
// needs to do arbitrary things based on time.
|
||||
type WithTicker interface {
|
||||
Clock
|
||||
// NewTicker returns a new Ticker.
|
||||
NewTicker(time.Duration) Ticker
|
||||
}
|
||||
|
||||
// Ticker defines the Ticker interface.
|
||||
type Ticker interface {
|
||||
C() <-chan time.Time
|
||||
Stop()
|
||||
}
|
||||
|
||||
var _ = WithTicker(RealClock{})
|
||||
|
||||
// RealClock really calls time.Now()
|
||||
type RealClock struct{}
|
||||
@ -52,6 +75,8 @@ func (RealClock) Since(ts time.Time) time.Duration {
|
||||
}
|
||||
|
||||
// After is the same as time.After(d).
|
||||
// This method does not allow to free/GC the backing timer before it fires. Use
|
||||
// NewTimer instead.
|
||||
func (RealClock) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
@ -64,11 +89,21 @@ func (RealClock) NewTimer(d time.Duration) Timer {
|
||||
}
|
||||
|
||||
// Tick is the same as time.Tick(d)
|
||||
// This method does not allow to free/GC the backing ticker. Use
|
||||
// NewTicker instead.
|
||||
func (RealClock) Tick(d time.Duration) <-chan time.Time {
|
||||
return time.Tick(d)
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker.
|
||||
func (RealClock) NewTicker(d time.Duration) Ticker {
|
||||
return &realTicker{
|
||||
ticker: time.NewTicker(d),
|
||||
}
|
||||
}
|
||||
|
||||
// Sleep is the same as time.Sleep(d)
|
||||
// Consider making the sleep interruptible by using 'select' on a context channel and a timer channel.
|
||||
func (RealClock) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
@ -102,3 +137,15 @@ func (r *realTimer) Stop() bool {
|
||||
func (r *realTimer) Reset(d time.Duration) bool {
|
||||
return r.timer.Reset(d)
|
||||
}
|
||||
|
||||
type realTicker struct {
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
func (r *realTicker) C() <-chan time.Time {
|
||||
return r.ticker.C
|
||||
}
|
||||
|
||||
func (r *realTicker) Stop() {
|
||||
r.ticker.Stop()
|
||||
}
|
||||
|
4
vendor/k8s.io/utils/lru/lru.go
generated
vendored
4
vendor/k8s.io/utils/lru/lru.go
generated
vendored
@ -45,8 +45,8 @@ func (c *Cache) Add(key Key, value interface{}) {
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.cache.Get(key)
|
||||
}
|
||||
|
||||
|
18
vendor/k8s.io/utils/pointer/pointer.go
generated
vendored
18
vendor/k8s.io/utils/pointer/pointer.go
generated
vendored
@ -46,6 +46,24 @@ func AllPtrFieldsNil(obj interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Int returns a pointer to an int
|
||||
func Int(i int) *int {
|
||||
return &i
|
||||
}
|
||||
|
||||
var IntPtr = Int // for back-compat
|
||||
|
||||
// IntDeref dereferences the int ptr and returns it if not nil, or else
|
||||
// returns def.
|
||||
func IntDeref(ptr *int, def int) int {
|
||||
if ptr != nil {
|
||||
return *ptr
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
var IntPtrDerefOr = IntDeref // for back-compat
|
||||
|
||||
// Int32 returns a pointer to an int32.
|
||||
func Int32(i int32) *int32 {
|
||||
return &i
|
||||
|
114
vendor/modules.txt
vendored
114
vendor/modules.txt
vendored
@ -899,7 +899,7 @@ github.com/opencontainers/go-digest/digestset
|
||||
github.com/opencontainers/image-spec/identity
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.0.1 => github.com/opencontainers/runc v1.0.1
|
||||
# github.com/opencontainers/runc v1.0.2 => github.com/opencontainers/runc v1.0.2
|
||||
## explicit
|
||||
github.com/opencontainers/runc
|
||||
github.com/opencontainers/runc/contrib/cmd/recvtty
|
||||
@ -1624,7 +1624,7 @@ gopkg.in/yaml.v3
|
||||
# inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252
|
||||
## explicit
|
||||
inet.af/tcpproxy
|
||||
# k8s.io/api v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.1-k3s1
|
||||
# k8s.io/api v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -1672,7 +1672,7 @@ k8s.io/api/scheduling/v1beta1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.1-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.2-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
@ -1712,7 +1712,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.1-k3s1
|
||||
# k8s.io/apimachinery v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -1777,7 +1777,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.1-k3s1
|
||||
# k8s.io/apiserver v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
@ -1919,11 +1919,11 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.17.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.1-k3s1
|
||||
# k8s.io/cli-runtime v0.17.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.2-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.1-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
|
||||
@ -2215,7 +2215,7 @@ k8s.io/client-go/util/jsonpath
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.1-k3s1
|
||||
# k8s.io/cloud-provider v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
@ -2237,13 +2237,13 @@ k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.1-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.2-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.1-k3s1
|
||||
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.2-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
@ -2258,7 +2258,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.1-k3s1
|
||||
# k8s.io/component-base v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
@ -2284,7 +2284,7 @@ k8s.io/component-base/term
|
||||
k8s.io/component-base/traces
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.1-k3s1
|
||||
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.2-k3s1
|
||||
k8s.io/component-helpers/apimachinery/lease
|
||||
k8s.io/component-helpers/apps/poddisruptionbudget
|
||||
k8s.io/component-helpers/auth/rbac/reconciliation
|
||||
@ -2293,7 +2293,7 @@ k8s.io/component-helpers/node/topology
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
k8s.io/component-helpers/storage/volume
|
||||
# k8s.io/controller-manager v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.1-k3s1
|
||||
# k8s.io/controller-manager v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/controller-manager/app
|
||||
k8s.io/controller-manager/config
|
||||
@ -2307,11 +2307,11 @@ k8s.io/controller-manager/pkg/informerfactory
|
||||
k8s.io/controller-manager/pkg/leadermigration
|
||||
k8s.io/controller-manager/pkg/leadermigration/config
|
||||
k8s.io/controller-manager/pkg/leadermigration/options
|
||||
# k8s.io/cri-api v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.1-k3s1
|
||||
# k8s.io/cri-api v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.1-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.2-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
@ -2328,7 +2328,7 @@ k8s.io/klog
|
||||
# k8s.io/klog/v2 v2.9.0 => github.com/k3s-io/klog/v2 v2.9.0-k3s1
|
||||
## explicit
|
||||
k8s.io/klog/v2
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.1-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.2-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
@ -2356,7 +2356,7 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.1-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.2-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
@ -2372,14 +2372,14 @@ k8s.io/kube-openapi/pkg/validation/spec
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt/bson
|
||||
k8s.io/kube-openapi/pkg/validation/validate
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.1-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.2-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.1-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.2-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1beta1
|
||||
k8s.io/kube-scheduler/config/v1beta2
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.22.1 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.1-k3s1
|
||||
# k8s.io/kubectl v0.22.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
@ -2457,7 +2457,7 @@ k8s.io/kubectl/pkg/util/storage
|
||||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.1-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.2-k3s1
|
||||
k8s.io/kubelet/config/v1alpha1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis
|
||||
@ -2469,7 +2469,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
k8s.io/kubelet/pkg/apis/podresources/v1
|
||||
k8s.io/kubelet/pkg/apis/podresources/v1alpha1
|
||||
k8s.io/kubelet/pkg/apis/stats/v1alpha1
|
||||
# k8s.io/kubernetes v1.22.1 => github.com/k3s-io/kubernetes v1.22.1-k3s1
|
||||
# k8s.io/kubernetes v1.22.2 => github.com/k3s-io/kubernetes v1.22.2-k3s1
|
||||
## explicit
|
||||
k8s.io/kubernetes/cmd/kube-apiserver/app
|
||||
k8s.io/kubernetes/cmd/kube-apiserver/app/options
|
||||
@ -3201,7 +3201,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.1-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.2-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
@ -3244,7 +3244,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.1-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.2-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
@ -3260,9 +3260,9 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
|
||||
k8s.io/metrics/pkg/client/custom_metrics
|
||||
k8s.io/metrics/pkg/client/custom_metrics/scheme
|
||||
k8s.io/metrics/pkg/client/external_metrics
|
||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.1-k3s1
|
||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.2-k3s1
|
||||
k8s.io/mount-utils
|
||||
# k8s.io/pod-security-admission v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1
|
||||
# k8s.io/pod-security-admission v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.2-k3s1
|
||||
k8s.io/pod-security-admission/admission
|
||||
k8s.io/pod-security-admission/admission/api
|
||||
k8s.io/pod-security-admission/admission/api/load
|
||||
@ -3272,7 +3272,7 @@ k8s.io/pod-security-admission/admission/api/validation
|
||||
k8s.io/pod-security-admission/api
|
||||
k8s.io/pod-security-admission/metrics
|
||||
k8s.io/pod-security-admission/policy
|
||||
# k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
|
||||
# k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
|
||||
## explicit
|
||||
k8s.io/utils/buffer
|
||||
k8s.io/utils/clock
|
||||
@ -3403,7 +3403,7 @@ sigs.k8s.io/yaml
|
||||
# github.com/juju/errors => github.com/k3s-io/nocode v0.0.0-20200630202308-cb097102c09f
|
||||
# github.com/kubernetes-sigs/cri-tools => github.com/k3s-io/cri-tools v1.21.0-k3s1
|
||||
# github.com/matryer/moq => github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009
|
||||
# github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.1
|
||||
# github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.2
|
||||
# github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.0.3-0.20210316141917-a8c4a9ee0f6b
|
||||
# github.com/rancher/k3s/pkg/data => ./pkg/data
|
||||
# github.com/rancher/wrangler => github.com/rancher/wrangler v0.8.5
|
||||
@ -3417,35 +3417,35 @@ sigs.k8s.io/yaml
|
||||
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
# google.golang.org/grpc => google.golang.org/grpc v1.38.0
|
||||
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.1-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.1-k3s1
|
||||
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.1-k3s1
|
||||
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.1-k3s1
|
||||
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.1-k3s1
|
||||
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.1-k3s1
|
||||
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.1-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.1-k3s1
|
||||
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.1-k3s1
|
||||
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.1-k3s1
|
||||
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.1-k3s1
|
||||
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.1-k3s1
|
||||
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.1-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.1-k3s1
|
||||
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.2-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.2-k3s1
|
||||
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.2-k3s1
|
||||
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.2-k3s1
|
||||
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.2-k3s1
|
||||
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.2-k3s1
|
||||
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.2-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.2-k3s1
|
||||
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.2-k3s1
|
||||
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.2-k3s1
|
||||
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.2-k3s1
|
||||
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.2-k3s1
|
||||
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.2-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.2-k3s1
|
||||
# k8s.io/klog => github.com/k3s-io/klog v1.0.0-k3s1
|
||||
# k8s.io/klog/v2 => github.com/k3s-io/klog/v2 v2.9.0-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.1-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.1-k3s1
|
||||
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.1-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.1-k3s1
|
||||
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.1-k3s1
|
||||
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.1-k3s1
|
||||
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.1-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.1-k3s1
|
||||
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.1-k3s1
|
||||
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.1-k3s1
|
||||
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.1-k3s1
|
||||
# k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.1-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.1-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.1-k3s1
|
||||
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.1-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.2-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.2-k3s1
|
||||
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.2-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.2-k3s1
|
||||
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.2-k3s1
|
||||
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.2-k3s1
|
||||
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.2-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.2-k3s1
|
||||
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.2-k3s1
|
||||
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.2-k3s1
|
||||
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.2-k3s1
|
||||
# k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.2-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.2-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.2-k3s1
|
||||
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.2-k3s1
|
||||
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
|
||||
|
Loading…
Reference in New Issue
Block a user