mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
[master] Bump Kubernetes to v1.22.4-k3s1 (#4536)
* Update to v1.22.4 Signed-off-by: Chris Kim <oats87g@gmail.com>
This commit is contained in:
parent
03485632ea
commit
65110a4eec
76
go.mod
76
go.mod
@ -38,37 +38,37 @@ replace (
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
google.golang.org/grpc => google.golang.org/grpc v1.38.0
|
||||
gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.3-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.3-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.3-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.3-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.3-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.3-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.3-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.3-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.3-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.3-k3s1
|
||||
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.3-k3s1
|
||||
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.3-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.3-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.3-k3s1
|
||||
k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.4-k3s1
|
||||
k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.4-k3s1
|
||||
k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.4-k3s1
|
||||
k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.4-k3s1
|
||||
k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.4-k3s1
|
||||
k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.4-k3s1
|
||||
k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.4-k3s1
|
||||
k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.4-k3s1
|
||||
k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.4-k3s1
|
||||
k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.4-k3s1
|
||||
k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.4-k3s1
|
||||
k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.4-k3s1
|
||||
k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.4-k3s1
|
||||
k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.4-k3s1
|
||||
k8s.io/klog => github.com/k3s-io/klog v1.0.0-k3s2 // k3s-release-1.x
|
||||
k8s.io/klog/v2 => github.com/k3s-io/klog/v2 v2.9.0-k3s2 // k3s-main
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.3-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.3-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.3-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.3-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.3-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.3-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.3-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.3-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.3-k3s1
|
||||
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.3-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.3-k3s1
|
||||
k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.3-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.3-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.3-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.3-k3s1
|
||||
k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.4-k3s1
|
||||
k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.4-k3s1
|
||||
k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.4-k3s1
|
||||
k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.4-k3s1
|
||||
k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.4-k3s1
|
||||
k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.4-k3s1
|
||||
k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.4-k3s1
|
||||
k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.4-k3s1
|
||||
k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.4-k3s1
|
||||
k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.4-k3s1
|
||||
k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.4-k3s1
|
||||
k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.4-k3s1
|
||||
k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.4-k3s1
|
||||
k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.4-k3s1
|
||||
k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.4-k3s1
|
||||
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
|
||||
)
|
||||
|
||||
@ -127,18 +127,18 @@ require (
|
||||
google.golang.org/grpc v1.40.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252
|
||||
k8s.io/api v0.22.3
|
||||
k8s.io/apimachinery v0.22.3
|
||||
k8s.io/apiserver v0.22.3
|
||||
k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery v0.22.4
|
||||
k8s.io/apiserver v0.22.4
|
||||
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
|
||||
k8s.io/cloud-provider v0.22.3
|
||||
k8s.io/component-base v0.22.3
|
||||
k8s.io/controller-manager v0.22.3 // indirect
|
||||
k8s.io/cri-api v0.22.3
|
||||
k8s.io/cloud-provider v0.22.4
|
||||
k8s.io/component-base v0.22.4
|
||||
k8s.io/controller-manager v0.22.4 // indirect
|
||||
k8s.io/cri-api v0.22.4
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kubectl v0.22.3
|
||||
k8s.io/kubernetes v1.22.3
|
||||
k8s.io/kubectl v0.22.4
|
||||
k8s.io/kubernetes v1.22.4
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
105
go.sum
105
go.sum
@ -578,57 +578,57 @@ github.com/k3s-io/klog v1.0.0-k3s2 h1:yyvD2bQbxG7m85/pvNctLX2bUDmva5kOBvuZ77tTGB
|
||||
github.com/k3s-io/klog v1.0.0-k3s2/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
github.com/k3s-io/klog/v2 v2.9.0-k3s2 h1:8Dzu3wGPFMo1mPEobSEpkHWH+HXqgFXp8R7FbcdgE8k=
|
||||
github.com/k3s-io/klog/v2 v2.9.0-k3s2/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
github.com/k3s-io/kubernetes v1.22.3-k3s1 h1:nWDZxiuxeIehEeA50BQpND+IiHdSpuF1aubACpRWxSM=
|
||||
github.com/k3s-io/kubernetes v1.22.3-k3s1/go.mod h1:Snea7fgIObGgHmLbUJ3OgjGEr5bjj16iEdp5oHS6eS8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.3-k3s1 h1:jetu7IIafofabAXIXQr1bO0Eyb7K877B3Jmsst0E9D4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.3-k3s1/go.mod h1:IpPnJRE5t3olVaut5p67N16cZkWwwU5KVFM35xCKyxM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.3-k3s1 h1:QiVDRF9YqPgCQU9RmW/5mDdGEqkYRDzndBfU5kHk2pI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.3-k3s1/go.mod h1:QmWu0gjtyJOtpuSADyICOuml8CoD/poBm+IV/jrT4D8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.3-k3s1 h1:796f9bLpYzAYCKIbBP7Q9us0+IE8WiXVAWjZRnrHkOs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.3-k3s1/go.mod h1:J+pXqtTbzz3Sg3WGrq6bFtKQ2D9xbnRY3VdVaX0lJjw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.3-k3s1 h1:IYXHTyXPfhaV/MvxjDR5ys1+RV0LrWTdqGmgakQJPjI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.3-k3s1/go.mod h1:LGIW+JHWnonf/5jG9JRDEXxsZsbTKcC/4aMljJaz8bA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.3-k3s1 h1:uT05wmj7oH476NGd2KEAeLG4RISecX4PfbqTFdolu2Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.3-k3s1/go.mod h1:/G/EQBeZVcKQ6+aqZhYUZUB7zh13XkshYpnX4m75hGM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.3-k3s1 h1:YSfqyRuJB5TvyWXT98f7gfYfh9ge+uAthJr+OhlSSPQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.3-k3s1/go.mod h1:O2pnHGDR/pgPeefYRaZj7oAOfA3WukxKhGFzkGhyIME=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.3-k3s1 h1:yfJKnbMucvZtCvjGA+qJIPLXN6IagSYAPqjAb/G7REU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.3-k3s1/go.mod h1:YfjUcxHPiB9x/eHUrBtefZ61AuHGSDXfyXtsLS5UlMQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.3-k3s1 h1:nITd9SSvu9O78XbLnfYLi+Y3RV8skMzu4i0vCXPL1VU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.3-k3s1/go.mod h1:ppZJmhTukDTa5g/F0ksVMLM0Owbi9GeKhzuTXAVVJig=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.3-k3s1 h1:0G1ww23xzEJW9FeEJvAqr+Uj+Ng/LHFmN7BqyTHm12I=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.3-k3s1/go.mod h1:sUUmwtmwhRVMXuCc1xDCW0VAqgY6LwabgtXcWxMBL8Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.3-k3s1 h1:hMxyUWj7gPXr3iKZl3hlPZalHStacU/TtK4r+lbGhvs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.3-k3s1/go.mod h1:cn9EB9A1wujtKWsHqB9lkYq8FL4dUuftmiqNyXIQEmE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.3-k3s1 h1:dHrlX1219tRM+hb7WEBhOm9R+XdJu+mT/4fbri3K8NE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.3-k3s1/go.mod h1:9Bx6HezI9sKzn5Boasw7vMT8FRgcXsExOoT87Wzdls4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.3-k3s1 h1:dXZ4C0V68+kFo2iWjb1vElqPKE9sTYZoPl/vOlvOVxs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.3-k3s1/go.mod h1:aPin+82yKPEirDGBtNS/4fcc3a1QVOqdt6zzxOlrfc8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.3-k3s1 h1:k98cXpAcj1xykoA9HTwgLi1GJSk6eJvExCBD6eDGDcU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.3-k3s1/go.mod h1:2XpB3zC24SSO5mhWTzWwBHrO5rO19ncH1bdqS+VuQsI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.3-k3s1 h1:Kp4+RXXVcGm8dfoEF+5m/A9OHpgmEWO0RRfGiUGD1vk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.3-k3s1/go.mod h1:B1gPUSbK2PVSnkxCgw/fmDckzQU6UCuyl670XFbEw6Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.3-k3s1 h1:XLwfhXd/qHIRY+YsULO+UCGfnmrv0qEMAEvmaY3XNas=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.3-k3s1/go.mod h1:LyGGFXi1A101JZQjBNy4RgVgAwtQ5UhRy2KfX/STctk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.3-k3s1 h1:wa02xS5pSBenTik5SX6aFFPy8IpJqppjwJi8Q0oCFAI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.3-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.3-k3s1 h1:EiVJjtwaa7caTbTkUg05IqbBsduUkXB5xU4LHQ7IxvU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.3-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.3-k3s1 h1:28g7PH3lzZgD0n+4HATFdTcFhn44B9X2nwiNM7ruoJY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.3-k3s1/go.mod h1:xZnfOrGTta6rB9IWNKl82yzWKpMSUXVmyGHRilQ9kzM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.3-k3s1 h1:dZ6Pe1+R+whgwU/DyL91R4WguTVcZn2zrjGVgIeFmaE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.3-k3s1/go.mod h1:ekgaQk/DhXcVFh7pMs0S8QU7uSGhC5XXcmX2MfMz+PQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.3-k3s1 h1:/5GctQAwGMpx5b/cN2ePx+uO5758BWpZNjsy3PmtDYw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.3-k3s1/go.mod h1:ykhJBi1kXSUeYSTzHe2F6A8nDAfF2jjClsagmgX96vk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.3-k3s1 h1:CVW77IGQ7XEylg39HA0rFjzihBTJX/DtMCl4pw16iYs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.3-k3s1/go.mod h1:X8EaUY5K2IM/62KAMuHGuHyOhsJwvsoRwdvsyWjm++g=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.3-k3s1 h1:95hSpRrUV0U1JtwPyR4b3df9SSpDqkVkJOh7gOQ41Cs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.3-k3s1/go.mod h1:I5RbQZ+gj12KSgWzMyHaE0hudGajvT/Nc5jRE/WMJnI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.3-k3s1 h1:lcDSUWyM0BMCu/GgPWWco0H8ogTdFvJg3UVwahwZxOk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.3-k3s1/go.mod h1:7UvmmOyjKl2RW0tgpT4l1z7dxVV4TMnAAlSN95cpUeM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.3-k3s1 h1:ccyWlx1MSPT6fX0JSLoi8P3lbBxLqjvFoK5L1lv6sCw=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.3-k3s1/go.mod h1:NQr24M7bou27uJhKTCgiz8L/BbJm2G9ZjUQOmO1ZkDE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.3-k3s1/go.mod h1:mpLHq04wAiOpaWE4BI8ArSQp82DIgRirioGL6CryJDg=
|
||||
github.com/k3s-io/kubernetes v1.22.4-k3s1 h1:a7a87QH6W5xSeYNjjdHS8AdMGrj7uauO6BuPJoeTXiI=
|
||||
github.com/k3s-io/kubernetes v1.22.4-k3s1/go.mod h1:cMy6DFG4E+/jxMgxw1aWMwZqvI1AueV3HCcG9S7QNIk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.4-k3s1 h1:AHoGUNjhbQX75KWL0TuktFG2NbmIVqo0GhukN8XmP0s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.4-k3s1/go.mod h1:IpPnJRE5t3olVaut5p67N16cZkWwwU5KVFM35xCKyxM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.4-k3s1 h1:VfZnYgB2GnJI9wg2HTh4SykSMhct5zKIFvmP5EiCxfg=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.4-k3s1/go.mod h1:zmJC+9mioTWRSt+xhOuRY3B87bEz3EtGeCd91gYRSi8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.4-k3s1 h1:YsYOB9ld0d3qdE0/g8Tk8tgQqkok/b01NX2FHSXiRmU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.4-k3s1/go.mod h1:HSS1wcqIAF5ey2HsA1bh4U1SZzae/8FV8VZAwiHK8PU=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.4-k3s1 h1:XQbivk62qMy7Q8rklMkHRF+eLjJ8u3Cqc0EeZ6mmTM4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.4-k3s1/go.mod h1:cVrnyIo0O8rjEuwU3zDrcNB0+btiewJZILVn7l04ndA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.4-k3s1 h1:nPGXxWbLI1mzqL6YRBTUH83oKYbiGqU50M2xLcOEfk4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.4-k3s1/go.mod h1:lfgLhMRSK39X7vRKyVDIHEUngwllg9H4TCGWbC6sCeA=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.4-k3s1 h1:gKUSdJoXU7FSpB5F8es62ZPHXowCyvFEfLuMOSUAtWs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.4-k3s1/go.mod h1:O2pnHGDR/pgPeefYRaZj7oAOfA3WukxKhGFzkGhyIME=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.4-k3s1 h1:sJseLtOpIKYt3bZeCK108pB1kCV1hGuzxIbZWfkG8yE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.4-k3s1/go.mod h1:YfjUcxHPiB9x/eHUrBtefZ61AuHGSDXfyXtsLS5UlMQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.4-k3s1 h1:gNo4ngjVFAGE4pcNLueW0INcRn/DKBxpFenJ1PzPA0A=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.4-k3s1/go.mod h1:ppZJmhTukDTa5g/F0ksVMLM0Owbi9GeKhzuTXAVVJig=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.4-k3s1 h1:m6zmiTpMnPvOIP11+wjAWsnsSquQlRnwIl3Hx1dum8Y=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.4-k3s1/go.mod h1:+R4WCWo1TVAhFPpi7KfK5xSMMq+OUxpe0SXM8o+Y1PM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.4-k3s1 h1:XQpn74M9JoLO19rcOY85fN0difs4Jjwsq4j+ocS8I8o=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.4-k3s1/go.mod h1:cn9EB9A1wujtKWsHqB9lkYq8FL4dUuftmiqNyXIQEmE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.4-k3s1 h1:ArlHlCYeTu/5CCZIAioe+aeK8YUnW754fdaiJnyIuf4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.4-k3s1/go.mod h1:9Bx6HezI9sKzn5Boasw7vMT8FRgcXsExOoT87Wzdls4=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.4-k3s1 h1:cw8+4RCgkyQWD/3ERYOFCQ3gJAX90Lfa4MFsbOfn4S8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.4-k3s1/go.mod h1:aPin+82yKPEirDGBtNS/4fcc3a1QVOqdt6zzxOlrfc8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.4-k3s1 h1:xW01nw4ClRO+drw5s4YR9pLoy3SPm1CH6+ZJeilYok0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.4-k3s1/go.mod h1:2XpB3zC24SSO5mhWTzWwBHrO5rO19ncH1bdqS+VuQsI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.4-k3s1 h1:9uNa6fPpg+rpXa4jhmmwBs5QquUdJT8tN1VxEjC0x9w=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.4-k3s1/go.mod h1:B1gPUSbK2PVSnkxCgw/fmDckzQU6UCuyl670XFbEw6Q=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.4-k3s1 h1:SrNTndYl3oDd76FpfQetCAvb3KS/Q7LYRR4n1sUhxLQ=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.4-k3s1/go.mod h1:E98zh+3BdGMr4K4vdHQ++RDyPm29QUkHh/28KlKUS2M=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.4-k3s1 h1:o9eo80IgaxiWzXPCFo7azy+MJ1t936vTEOTG2glTYSs=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.4-k3s1/go.mod h1:46iKO45TZat/zvPyqe8TjLLrTS/U/nGB92Ft63PEPF0=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.4-k3s1 h1:xRSYSJ1op+xFXB4pTpQxm6TPUsVih26bixaL7t2rBJ8=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.4-k3s1/go.mod h1:6mEp02ABsuOeeBuUrrol78v9LYysX7Z8CZOMFlkPOOI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.4-k3s1 h1:kDo4emfEJkLpeTLvAqjofwaTNP8+KM3nHUW46b/pY1s=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.4-k3s1/go.mod h1:xZnfOrGTta6rB9IWNKl82yzWKpMSUXVmyGHRilQ9kzM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.4-k3s1 h1:AWCYJ5YiP2/x2qeweeoSPUg0qF+FQoW9xBAE+b/xH44=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.4-k3s1/go.mod h1:tQ6sES/uhan72qymDDma4H0wrrF0gcZIYV8cczR20es=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.4-k3s1 h1:fS1sfV8JD4ksV1uwruH6SOGk2YA9FmAwggmBoNvHbjE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.4-k3s1/go.mod h1:ykhJBi1kXSUeYSTzHe2F6A8nDAfF2jjClsagmgX96vk=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.4-k3s1 h1:BZNjFNRzgQRJnHxA35Fz2+ag7z+ShhvahyPrxQ7Tq9c=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.4-k3s1/go.mod h1:X8EaUY5K2IM/62KAMuHGuHyOhsJwvsoRwdvsyWjm++g=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.4-k3s1 h1:7hhRJgwEHGOLKtIdRFAqS+L2eWpehkv1vARzt9izq3g=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.4-k3s1/go.mod h1:I5RbQZ+gj12KSgWzMyHaE0hudGajvT/Nc5jRE/WMJnI=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.4-k3s1 h1:3n9EXeC3/+Z6uf92gyQ5DoNcEsaVjiNiCLrauUE8TSY=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.4-k3s1/go.mod h1:7UvmmOyjKl2RW0tgpT4l1z7dxVV4TMnAAlSN95cpUeM=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.4-k3s1 h1:BYAhog5yKELWQnADuEOjgmhv95ACupwuWMnH3Reoq7A=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.4-k3s1/go.mod h1:NQr24M7bou27uJhKTCgiz8L/BbJm2G9ZjUQOmO1ZkDE=
|
||||
github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.4-k3s1/go.mod h1:UxJ/6uQrndnWNUMK/16LYsFZK98WTolizwpjcIINGCg=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
@ -1305,8 +1305,9 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/system-validators v1.5.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
|
||||
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
|
26
vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go
generated
vendored
26
vendor/k8s.io/apiserver/pkg/server/httplog/httplog.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
@ -54,13 +55,17 @@ type respLogger struct {
|
||||
statusRecorded bool
|
||||
status int
|
||||
statusStack string
|
||||
addedInfo strings.Builder
|
||||
startTime time.Time
|
||||
// mutex is used when accessing addedInfo
|
||||
// It can be modified by other goroutine when logging happens (in case of request timeout)
|
||||
mutex sync.Mutex
|
||||
addedInfo strings.Builder
|
||||
startTime time.Time
|
||||
|
||||
captureErrorOutput bool
|
||||
|
||||
req *http.Request
|
||||
w http.ResponseWriter
|
||||
req *http.Request
|
||||
userAgent string
|
||||
w http.ResponseWriter
|
||||
|
||||
logStacktracePred StacktracePred
|
||||
}
|
||||
@ -121,6 +126,7 @@ func newLoggedWithStartTime(req *http.Request, w http.ResponseWriter, startTime
|
||||
return &respLogger{
|
||||
startTime: startTime,
|
||||
req: req,
|
||||
userAgent: req.UserAgent(),
|
||||
w: w,
|
||||
logStacktracePred: DefaultStacktracePred,
|
||||
}
|
||||
@ -171,6 +177,8 @@ func StatusIsNot(statuses ...int) StacktracePred {
|
||||
|
||||
// Addf adds additional data to be logged with this request.
|
||||
func (rl *respLogger) Addf(format string, data ...interface{}) {
|
||||
rl.mutex.Lock()
|
||||
defer rl.mutex.Unlock()
|
||||
rl.addedInfo.WriteString("\n")
|
||||
rl.addedInfo.WriteString(fmt.Sprintf(format, data...))
|
||||
}
|
||||
@ -200,10 +208,18 @@ func (rl *respLogger) Log() {
|
||||
"verb", verb,
|
||||
"URI", rl.req.RequestURI,
|
||||
"latency", latency,
|
||||
"userAgent", rl.req.UserAgent(),
|
||||
// We can't get UserAgent from rl.req.UserAgent() here as it accesses headers map,
|
||||
// which can be modified in another goroutine when apiserver request times out.
|
||||
// For example authentication filter modifies request's headers,
|
||||
// This can cause apiserver to crash with unrecoverable fatal error.
|
||||
// More info about concurrent read and write for maps: https://golang.org/doc/go1.6#runtime
|
||||
"userAgent", rl.userAgent,
|
||||
"audit-ID", auditID,
|
||||
"srcIP", rl.req.RemoteAddr,
|
||||
}
|
||||
// Lock for accessing addedInfo
|
||||
rl.mutex.Lock()
|
||||
defer rl.mutex.Unlock()
|
||||
|
||||
if rl.hijacked {
|
||||
keysAndValues = append(keysAndValues, "hijacked", true)
|
||||
|
11
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go
generated
vendored
11
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_context.go
generated
vendored
@ -52,6 +52,17 @@ func WatchInitialized(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// RequestDelegated informs the priority and fairness dispatcher that
|
||||
// a given request has been delegated to an aggregated API
|
||||
// server. No-op when priority and fairness is disabled.
|
||||
func RequestDelegated(ctx context.Context) {
|
||||
// The watch initialization signal doesn't traverse request
|
||||
// boundaries, so we generously fire it as soon as we know
|
||||
// that the request won't be serviced locally. Safe to call
|
||||
// for non-watch requests.
|
||||
WatchInitialized(ctx)
|
||||
}
|
||||
|
||||
// InitializationSignal is an interface that allows sending and handling
|
||||
// initialization signals.
|
||||
type InitializationSignal interface {
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "22"
|
||||
gitVersion = "v1.22.3-k3s1"
|
||||
gitCommit = "5d8c744cb54fc54d261b2f14e8464049aad7e32c"
|
||||
gitVersion = "v1.22.4-k3s1"
|
||||
gitCommit = "00226be68bd6b70f8fca252a1027fc0a35d070b8"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-10-28T15:23:07Z"
|
||||
buildDate = "2021-11-17T22:30:06Z"
|
||||
)
|
||||
|
11
vendor/k8s.io/cloud-provider/controllers/service/controller.go
generated
vendored
11
vendor/k8s.io/cloud-provider/controllers/service/controller.go
generated
vendored
@ -55,6 +55,9 @@ const (
|
||||
// should be changed appropriately.
|
||||
minRetryDelay = 5 * time.Second
|
||||
maxRetryDelay = 300 * time.Second
|
||||
// ToBeDeletedTaint is a taint used by the CLuster Autoscaler before marking a node for deletion. Defined in
|
||||
// https://github.com/kubernetes/autoscaler/blob/e80ab518340f88f364fe3ef063f8303755125971/cluster-autoscaler/utils/deletetaint/delete.go#L36
|
||||
ToBeDeletedTaint = "ToBeDeletedByClusterAutoscaler"
|
||||
)
|
||||
|
||||
type cachedService struct {
|
||||
@ -671,6 +674,14 @@ func (s *Controller) getNodeConditionPredicate() NodeConditionPredicate {
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove nodes that are about to be deleted by the cluster autoscaler.
|
||||
for _, taint := range node.Spec.Taints {
|
||||
if taint.Key == ToBeDeletedTaint {
|
||||
klog.V(4).Infof("Ignoring node %v with autoscaler taint %+v", node.Name, taint)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If we have no info, don't accept
|
||||
if len(node.Status.Conditions) == 0 {
|
||||
return false
|
||||
|
4
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
4
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
@ -780,8 +780,8 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
4
vendor/k8s.io/component-base/metrics/options.go
generated
vendored
4
vendor/k8s.io/component-base/metrics/options.go
generated
vendored
@ -58,8 +58,8 @@ func (o *Options) Validate() []error {
|
||||
|
||||
// AddFlags adds flags for exposing component metrics.
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet) {
|
||||
if o != nil {
|
||||
o = NewOptions()
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion,
|
||||
"The previous version for which you want to show hidden metrics. "+
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "22"
|
||||
gitVersion = "v1.22.3-k3s1"
|
||||
gitCommit = "5d8c744cb54fc54d261b2f14e8464049aad7e32c"
|
||||
gitVersion = "v1.22.4-k3s1"
|
||||
gitCommit = "00226be68bd6b70f8fca252a1027fc0a35d070b8"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2021-10-28T15:23:07Z"
|
||||
buildDate = "2021-11-17T22:30:06Z"
|
||||
)
|
||||
|
2
vendor/k8s.io/csi-translation-lib/go.sum
generated
vendored
2
vendor/k8s.io/csi-translation-lib/go.sum
generated
vendored
@ -210,7 +210,7 @@ k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
|
2
vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go
generated
vendored
2
vendor/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
genericfeatures "k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/server/egressselector"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
|
||||
"k8s.io/apiserver/pkg/util/x509metrics"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/transport"
|
||||
@ -175,6 +176,7 @@ func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
handler := proxy.NewUpgradeAwareHandler(location, proxyRoundTripper, true, upgrade, &responder{w: w})
|
||||
handler.InterceptRedirects = utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects)
|
||||
handler.RequireSameHostRedirects = utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidateProxyRedirects)
|
||||
utilflowcontrol.RequestDelegated(req.Context())
|
||||
handler.ServeHTTP(w, newReq)
|
||||
}
|
||||
|
||||
|
114
vendor/k8s.io/kube-openapi/pkg/handler/handler.go
generated
vendored
114
vendor/k8s.io/kube-openapi/pkg/handler/handler.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha512"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/http"
|
||||
@ -30,9 +31,9 @@ import (
|
||||
"github.com/emicklei/go-restful"
|
||||
"github.com/golang/protobuf/proto"
|
||||
openapi_v2 "github.com/googleapis/gnostic/openapiv2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/munnerz/goautoneg"
|
||||
"gopkg.in/yaml.v2"
|
||||
klog "k8s.io/klog/v2"
|
||||
"k8s.io/kube-openapi/pkg/builder"
|
||||
"k8s.io/kube-openapi/pkg/common"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
@ -55,13 +56,40 @@ type OpenAPIService struct {
|
||||
|
||||
lastModified time.Time
|
||||
|
||||
specBytes []byte
|
||||
specPb []byte
|
||||
specPbGz []byte
|
||||
jsonCache cache
|
||||
protoCache cache
|
||||
}
|
||||
|
||||
specBytesETag string
|
||||
specPbETag string
|
||||
specPbGzETag string
|
||||
type cache struct {
|
||||
BuildCache func() ([]byte, error)
|
||||
once sync.Once
|
||||
bytes []byte
|
||||
etag string
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *cache) Get() ([]byte, string, error) {
|
||||
c.once.Do(func() {
|
||||
bytes, err := c.BuildCache()
|
||||
// if there is an error updating the cache, there can be situations where
|
||||
// c.bytes contains a valid value (carried over from the previous update)
|
||||
// but c.err is also not nil; the cache user is expected to check for this
|
||||
c.err = err
|
||||
if c.err == nil {
|
||||
// don't override previous spec if we had an error
|
||||
c.bytes = bytes
|
||||
c.etag = computeETag(c.bytes)
|
||||
}
|
||||
})
|
||||
return c.bytes, c.etag, c.err
|
||||
}
|
||||
|
||||
func (c *cache) New(cacheBuilder func() ([]byte, error)) cache {
|
||||
return cache{
|
||||
bytes: c.bytes,
|
||||
etag: c.etag,
|
||||
BuildCache: cacheBuilder,
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -71,6 +99,9 @@ func init() {
|
||||
}
|
||||
|
||||
func computeETag(data []byte) string {
|
||||
if data == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("\"%X\"", sha512.Sum512(data))
|
||||
}
|
||||
|
||||
@ -83,51 +114,40 @@ func NewOpenAPIService(spec *spec.Swagger) (*OpenAPIService, error) {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time) {
|
||||
func (o *OpenAPIService) getSwaggerBytes() ([]byte, string, time.Time, error) {
|
||||
o.rwMutex.RLock()
|
||||
defer o.rwMutex.RUnlock()
|
||||
return o.specBytes, o.specBytesETag, o.lastModified
|
||||
specBytes, etag, err := o.jsonCache.Get()
|
||||
if err != nil {
|
||||
return nil, "", time.Time{}, err
|
||||
}
|
||||
return specBytes, etag, o.lastModified, nil
|
||||
}
|
||||
|
||||
func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time) {
|
||||
func (o *OpenAPIService) getSwaggerPbBytes() ([]byte, string, time.Time, error) {
|
||||
o.rwMutex.RLock()
|
||||
defer o.rwMutex.RUnlock()
|
||||
return o.specPb, o.specPbETag, o.lastModified
|
||||
}
|
||||
|
||||
func (o *OpenAPIService) getSwaggerPbGzBytes() ([]byte, string, time.Time) {
|
||||
o.rwMutex.RLock()
|
||||
defer o.rwMutex.RUnlock()
|
||||
return o.specPbGz, o.specPbGzETag, o.lastModified
|
||||
specPb, etag, err := o.protoCache.Get()
|
||||
if err != nil {
|
||||
return nil, "", time.Time{}, err
|
||||
}
|
||||
return specPb, etag, o.lastModified, nil
|
||||
}
|
||||
|
||||
func (o *OpenAPIService) UpdateSpec(openapiSpec *spec.Swagger) (err error) {
|
||||
specBytes, err := jsoniter.ConfigCompatibleWithStandardLibrary.Marshal(openapiSpec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
specPb, err := ToProtoBinary(specBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
specPbGz := toGzip(specPb)
|
||||
|
||||
specBytesETag := computeETag(specBytes)
|
||||
specPbETag := computeETag(specPb)
|
||||
specPbGzETag := computeETag(specPbGz)
|
||||
|
||||
lastModified := time.Now()
|
||||
|
||||
o.rwMutex.Lock()
|
||||
defer o.rwMutex.Unlock()
|
||||
|
||||
o.specBytes = specBytes
|
||||
o.specPb = specPb
|
||||
o.specPbGz = specPbGz
|
||||
o.specBytesETag = specBytesETag
|
||||
o.specPbETag = specPbETag
|
||||
o.specPbGzETag = specPbGzETag
|
||||
o.lastModified = lastModified
|
||||
o.jsonCache = o.jsonCache.New(func() ([]byte, error) {
|
||||
return json.Marshal(openapiSpec)
|
||||
})
|
||||
o.protoCache = o.protoCache.New(func() ([]byte, error) {
|
||||
json, _, err := o.jsonCache.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ToProtoBinary(json)
|
||||
})
|
||||
o.lastModified = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -206,7 +226,7 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
|
||||
accepted := []struct {
|
||||
Type string
|
||||
SubType string
|
||||
GetDataAndETag func() ([]byte, string, time.Time)
|
||||
GetDataAndETag func() ([]byte, string, time.Time, error)
|
||||
}{
|
||||
{"application", "json", o.getSwaggerBytes},
|
||||
{"application", "com.github.proto-openapi.spec.v2@v1.0+protobuf", o.getSwaggerPbBytes},
|
||||
@ -230,7 +250,15 @@ func (o *OpenAPIService) RegisterOpenAPIVersionedService(servePath string, handl
|
||||
}
|
||||
|
||||
// serve the first matching media type in the sorted clause list
|
||||
data, etag, lastModified := accepts.GetDataAndETag()
|
||||
data, etag, lastModified, err := accepts.GetDataAndETag()
|
||||
if err != nil {
|
||||
klog.Errorf("Error in OpenAPI handler: %s", err)
|
||||
// only return a 503 if we have no older cache data to serve
|
||||
if data == nil {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.Header().Set("Etag", etag)
|
||||
// ServeContent will take care of caching using eTag.
|
||||
http.ServeContent(w, r, servePath, lastModified, bytes.NewReader(data))
|
||||
|
159
vendor/k8s.io/kubectl/pkg/cmd/cp/cp.go
generated
vendored
159
vendor/k8s.io/kubectl/pkg/cmd/cp/cp.go
generated
vendored
@ -24,11 +24,8 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/lithammer/dedent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
@ -67,12 +64,6 @@ var (
|
||||
|
||||
# Copy /tmp/foo from a remote pod to /tmp/bar locally
|
||||
kubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar`))
|
||||
|
||||
cpUsageStr = dedent.Dedent(`
|
||||
expected 'cp <file-spec-src> <file-spec-dest> [-c container]'.
|
||||
<file-spec> is:
|
||||
[namespace/]pod-name:/file/path for a remote file
|
||||
/file/path for a local file`)
|
||||
)
|
||||
|
||||
// CopyOptions have the data required to perform the copy operation
|
||||
@ -158,6 +149,7 @@ func NewCmdCp(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.C
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmdutil.CheckErr(o.Complete(f, cmd))
|
||||
cmdutil.CheckErr(o.Validate(cmd, args))
|
||||
cmdutil.CheckErr(o.Run(args))
|
||||
},
|
||||
}
|
||||
@ -167,27 +159,22 @@ func NewCmdCp(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.C
|
||||
return cmd
|
||||
}
|
||||
|
||||
type fileSpec struct {
|
||||
PodNamespace string
|
||||
PodName string
|
||||
File string
|
||||
}
|
||||
|
||||
var (
|
||||
errFileSpecDoesntMatchFormat = errors.New("filespec must match the canonical format: [[namespace/]pod:]file/path")
|
||||
errFileCannotBeEmpty = errors.New("filepath can not be empty")
|
||||
)
|
||||
|
||||
func extractFileSpec(arg string) (fileSpec, error) {
|
||||
i := strings.Index(arg, ":")
|
||||
|
||||
if i == -1 {
|
||||
return fileSpec{File: arg}, nil
|
||||
}
|
||||
// filespec starting with a semicolon is invalid
|
||||
if i == 0 {
|
||||
return fileSpec{}, errFileSpecDoesntMatchFormat
|
||||
}
|
||||
if i == -1 {
|
||||
return fileSpec{
|
||||
File: newLocalPath(arg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
pod, file := arg[:i], arg[i+1:]
|
||||
pieces := strings.Split(pod, "/")
|
||||
@ -195,13 +182,13 @@ func extractFileSpec(arg string) (fileSpec, error) {
|
||||
case 1:
|
||||
return fileSpec{
|
||||
PodName: pieces[0],
|
||||
File: file,
|
||||
File: newRemotePath(file),
|
||||
}, nil
|
||||
case 2:
|
||||
return fileSpec{
|
||||
PodNamespace: pieces[0],
|
||||
PodName: pieces[1],
|
||||
File: file,
|
||||
File: newRemotePath(file),
|
||||
}, nil
|
||||
default:
|
||||
return fileSpec{}, errFileSpecDoesntMatchFormat
|
||||
@ -235,16 +222,13 @@ func (o *CopyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error {
|
||||
// Validate makes sure provided values for CopyOptions are valid
|
||||
func (o *CopyOptions) Validate(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 2 {
|
||||
return cmdutil.UsageErrorf(cmd, cpUsageStr)
|
||||
return fmt.Errorf("source and destination are required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run performs the execution
|
||||
func (o *CopyOptions) Run(args []string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("source and destination are required")
|
||||
}
|
||||
srcSpec, err := extractFileSpec(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -257,6 +241,9 @@ func (o *CopyOptions) Run(args []string) error {
|
||||
if len(srcSpec.PodName) != 0 && len(destSpec.PodName) != 0 {
|
||||
return fmt.Errorf("one of src or dest must be a local file specification")
|
||||
}
|
||||
if len(srcSpec.File.String()) == 0 || len(destSpec.File.String()) == 0 {
|
||||
return errors.New("filepath can not be empty")
|
||||
}
|
||||
|
||||
if len(srcSpec.PodName) != 0 {
|
||||
return o.copyFromPod(srcSpec, destSpec)
|
||||
@ -283,7 +270,7 @@ func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error {
|
||||
PodName: dest.PodName,
|
||||
},
|
||||
|
||||
Command: []string{"test", "-d", dest.File},
|
||||
Command: []string{"test", "-d", dest.File.String()},
|
||||
Executor: &exec.DefaultRemoteExecutor{},
|
||||
}
|
||||
|
||||
@ -291,29 +278,24 @@ func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error {
|
||||
}
|
||||
|
||||
func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) error {
|
||||
if len(src.File) == 0 || len(dest.File) == 0 {
|
||||
return errFileCannotBeEmpty
|
||||
}
|
||||
if _, err := os.Stat(src.File); err != nil {
|
||||
if _, err := os.Stat(src.File.String()); err != nil {
|
||||
return fmt.Errorf("%s doesn't exist in local filesystem", src.File)
|
||||
}
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
// strip trailing slash (if any)
|
||||
if dest.File != "/" && strings.HasSuffix(string(dest.File[len(dest.File)-1]), "/") {
|
||||
dest.File = dest.File[:len(dest.File)-1]
|
||||
}
|
||||
srcFile := src.File.(localPath)
|
||||
destFile := dest.File.(remotePath)
|
||||
|
||||
if err := o.checkDestinationIsDir(dest); err == nil {
|
||||
// If no error, dest.File was found to be a directory.
|
||||
// Copy specified src into it
|
||||
dest.File = dest.File + "/" + path.Base(src.File)
|
||||
destFile = destFile.Join(srcFile.Base())
|
||||
}
|
||||
|
||||
go func() {
|
||||
go func(src localPath, dest remotePath, writer io.WriteCloser) {
|
||||
defer writer.Close()
|
||||
cmdutil.CheckErr(makeTar(src.File, dest.File, writer))
|
||||
}()
|
||||
cmdutil.CheckErr(makeTar(src, dest, writer))
|
||||
}(srcFile, destFile, writer)
|
||||
var cmdArr []string
|
||||
|
||||
// TODO: Improve error messages by first testing if 'tar' is present in the container?
|
||||
@ -322,9 +304,9 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e
|
||||
} else {
|
||||
cmdArr = []string{"tar", "-xmf", "-"}
|
||||
}
|
||||
destDir := path.Dir(dest.File)
|
||||
if len(destDir) > 0 {
|
||||
cmdArr = append(cmdArr, "-C", destDir)
|
||||
destFileDir := destFile.Dir().String()
|
||||
if len(destFileDir) > 0 {
|
||||
cmdArr = append(cmdArr, "-C", destFileDir)
|
||||
}
|
||||
|
||||
options.StreamOptions = exec.StreamOptions{
|
||||
@ -345,10 +327,6 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e
|
||||
}
|
||||
|
||||
func (o *CopyOptions) copyFromPod(src, dest fileSpec) error {
|
||||
if len(src.File) == 0 || len(dest.File) == 0 {
|
||||
return errFileCannotBeEmpty
|
||||
}
|
||||
|
||||
reader, outStream := io.Pipe()
|
||||
options := &exec.ExecOptions{
|
||||
StreamOptions: exec.StreamOptions{
|
||||
@ -363,7 +341,7 @@ func (o *CopyOptions) copyFromPod(src, dest fileSpec) error {
|
||||
},
|
||||
|
||||
// TODO: Improve error messages by first testing if 'tar' is present in the container?
|
||||
Command: []string{"tar", "cf", "-", src.File},
|
||||
Command: []string{"tar", "cf", "-", src.File.String()},
|
||||
Executor: &exec.DefaultRemoteExecutor{},
|
||||
}
|
||||
|
||||
@ -371,49 +349,28 @@ func (o *CopyOptions) copyFromPod(src, dest fileSpec) error {
|
||||
defer outStream.Close()
|
||||
cmdutil.CheckErr(o.execute(options))
|
||||
}()
|
||||
prefix := getPrefix(src.File)
|
||||
prefix = path.Clean(prefix)
|
||||
|
||||
srcFile := src.File.(remotePath)
|
||||
destFile := dest.File.(localPath)
|
||||
|
||||
// remove extraneous path shortcuts - these could occur if a path contained extra "../"
|
||||
// and attempted to navigate beyond "/" in a remote filesystem
|
||||
prefix = stripPathShortcuts(prefix)
|
||||
return o.untarAll(src, reader, dest.File, prefix)
|
||||
prefix := stripPathShortcuts(srcFile.StripSlashes().Clean().String())
|
||||
return o.untarAll(src.PodNamespace, src.PodName, prefix, srcFile, destFile, reader)
|
||||
}
|
||||
|
||||
// stripPathShortcuts removes any leading or trailing "../" from a given path
|
||||
func stripPathShortcuts(p string) string {
|
||||
newPath := path.Clean(p)
|
||||
trimmed := strings.TrimPrefix(newPath, "../")
|
||||
|
||||
for trimmed != newPath {
|
||||
newPath = trimmed
|
||||
trimmed = strings.TrimPrefix(newPath, "../")
|
||||
}
|
||||
|
||||
// trim leftover {".", ".."}
|
||||
if newPath == "." || newPath == ".." {
|
||||
newPath = ""
|
||||
}
|
||||
|
||||
if len(newPath) > 0 && string(newPath[0]) == "/" {
|
||||
return newPath[1:]
|
||||
}
|
||||
|
||||
return newPath
|
||||
}
|
||||
|
||||
func makeTar(srcPath, destPath string, writer io.Writer) error {
|
||||
func makeTar(src localPath, dest remotePath, writer io.Writer) error {
|
||||
// TODO: use compression here?
|
||||
tarWriter := tar.NewWriter(writer)
|
||||
defer tarWriter.Close()
|
||||
|
||||
srcPath = path.Clean(srcPath)
|
||||
destPath = path.Clean(destPath)
|
||||
return recursiveTar(path.Dir(srcPath), path.Base(srcPath), path.Dir(destPath), path.Base(destPath), tarWriter)
|
||||
srcPath := src.Clean()
|
||||
destPath := dest.Clean()
|
||||
return recursiveTar(srcPath.Dir(), srcPath.Base(), destPath.Dir(), destPath.Base(), tarWriter)
|
||||
}
|
||||
|
||||
func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) error {
|
||||
srcPath := path.Join(srcBase, srcFile)
|
||||
matchedPaths, err := filepath.Glob(srcPath)
|
||||
func recursiveTar(srcDir, srcFile localPath, destDir, destFile remotePath, tw *tar.Writer) error {
|
||||
matchedPaths, err := srcDir.Join(srcFile).Glob()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -430,13 +387,14 @@ func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) e
|
||||
if len(files) == 0 {
|
||||
//case empty directory
|
||||
hdr, _ := tar.FileInfoHeader(stat, fpath)
|
||||
hdr.Name = destFile
|
||||
hdr.Name = destFile.String()
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, f := range files {
|
||||
if err := recursiveTar(srcBase, path.Join(srcFile, f.Name()), destBase, path.Join(destFile, f.Name()), tw); err != nil {
|
||||
if err := recursiveTar(srcDir, srcFile.Join(newLocalPath(f.Name())),
|
||||
destDir, destFile.Join(newRemotePath(f.Name())), tw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -450,7 +408,7 @@ func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) e
|
||||
}
|
||||
|
||||
hdr.Linkname = target
|
||||
hdr.Name = destFile
|
||||
hdr.Name = destFile.String()
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -460,7 +418,7 @@ func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Name = destFile
|
||||
hdr.Name = destFile.String()
|
||||
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
@ -481,7 +439,7 @@ func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *CopyOptions) untarAll(src fileSpec, reader io.Reader, destDir, prefix string) error {
|
||||
func (o *CopyOptions) untarAll(ns, pod string, prefix string, src remotePath, dest localPath, reader io.Reader) error {
|
||||
symlinkWarningPrinted := false
|
||||
// TODO: use compression here?
|
||||
tarReader := tar.NewReader(reader)
|
||||
@ -505,19 +463,21 @@ func (o *CopyOptions) untarAll(src fileSpec, reader io.Reader, destDir, prefix s
|
||||
|
||||
// basic file information
|
||||
mode := header.FileInfo().Mode()
|
||||
destFileName := filepath.Join(destDir, header.Name[len(prefix):])
|
||||
// header.Name is a name of the REMOTE file, so we need to create
|
||||
// a remotePath so that it goes through appropriate processing related
|
||||
// with cleaning remote paths
|
||||
destFileName := dest.Join(newRemotePath(header.Name[len(prefix):]))
|
||||
|
||||
if !isDestRelative(destDir, destFileName) {
|
||||
if !isRelative(dest, destFileName) {
|
||||
fmt.Fprintf(o.IOStreams.ErrOut, "warning: file %q is outside target destination, skipping\n", destFileName)
|
||||
continue
|
||||
}
|
||||
|
||||
baseName := filepath.Dir(destFileName)
|
||||
if err := os.MkdirAll(baseName, 0755); err != nil {
|
||||
if err := os.MkdirAll(destFileName.Dir().String(), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if header.FileInfo().IsDir() {
|
||||
if err := os.MkdirAll(destFileName, 0755); err != nil {
|
||||
if err := os.MkdirAll(destFileName.String(), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
@ -525,14 +485,16 @@ func (o *CopyOptions) untarAll(src fileSpec, reader io.Reader, destDir, prefix s
|
||||
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !symlinkWarningPrinted && len(o.ExecParentCmdName) > 0 {
|
||||
fmt.Fprintf(o.IOStreams.ErrOut, "warning: skipping symlink: %q -> %q (consider using \"%s exec -n %q %q -- tar cf - %q | tar xf -\")\n", destFileName, header.Linkname, o.ExecParentCmdName, src.PodNamespace, src.PodName, src.File)
|
||||
fmt.Fprintf(o.IOStreams.ErrOut,
|
||||
"warning: skipping symlink: %q -> %q (consider using \"%s exec -n %q %q -- tar cf - %q | tar xf -\")\n",
|
||||
destFileName, header.Linkname, o.ExecParentCmdName, ns, pod, src)
|
||||
symlinkWarningPrinted = true
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(o.IOStreams.ErrOut, "warning: skipping symlink: %q -> %q\n", destFileName, header.Linkname)
|
||||
continue
|
||||
}
|
||||
outFile, err := os.Create(destFileName)
|
||||
outFile, err := os.Create(destFileName.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -548,21 +510,6 @@ func (o *CopyOptions) untarAll(src fileSpec, reader io.Reader, destDir, prefix s
|
||||
return nil
|
||||
}
|
||||
|
||||
// isDestRelative returns true if dest is pointing outside the base directory,
|
||||
// false otherwise.
|
||||
func isDestRelative(base, dest string) bool {
|
||||
relative, err := filepath.Rel(base, dest)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return relative == "." || relative == stripPathShortcuts(relative)
|
||||
}
|
||||
|
||||
func getPrefix(file string) string {
|
||||
// tar strips the leading '/' if it's there, so we will too
|
||||
return strings.TrimLeft(file, "/")
|
||||
}
|
||||
|
||||
func (o *CopyOptions) execute(options *exec.ExecOptions) error {
|
||||
if len(options.Namespace) == 0 {
|
||||
options.Namespace = o.Namespace
|
||||
|
161
vendor/k8s.io/kubectl/pkg/cmd/cp/filespec.go
generated
vendored
Normal file
161
vendor/k8s.io/kubectl/pkg/cmd/cp/filespec.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cp
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type fileSpec struct {
|
||||
PodName string
|
||||
PodNamespace string
|
||||
File pathSpec
|
||||
}
|
||||
|
||||
type pathSpec interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
// localPath represents a client-native path, which will differ based
|
||||
// on the client OS, its methods will use path/filepath package which
|
||||
// is OS dependant
|
||||
type localPath struct {
|
||||
file string
|
||||
}
|
||||
|
||||
func newLocalPath(fileName string) localPath {
|
||||
file := stripTrailingSlash(fileName)
|
||||
return localPath{file: file}
|
||||
}
|
||||
|
||||
func (p localPath) String() string {
|
||||
return p.file
|
||||
}
|
||||
|
||||
func (p localPath) Dir() localPath {
|
||||
return newLocalPath(filepath.Dir(p.file))
|
||||
}
|
||||
|
||||
func (p localPath) Base() localPath {
|
||||
return newLocalPath(filepath.Base(p.file))
|
||||
}
|
||||
|
||||
func (p localPath) Clean() localPath {
|
||||
return newLocalPath(filepath.Clean(p.file))
|
||||
}
|
||||
|
||||
func (p localPath) Join(elem pathSpec) localPath {
|
||||
return newLocalPath(filepath.Join(p.file, elem.String()))
|
||||
}
|
||||
|
||||
func (p localPath) Glob() (matches []string, err error) {
|
||||
return filepath.Glob(p.file)
|
||||
}
|
||||
|
||||
func (p localPath) StripSlashes() localPath {
|
||||
return newLocalPath(stripLeadingSlash(p.file))
|
||||
}
|
||||
|
||||
func isRelative(base, target localPath) bool {
|
||||
relative, err := filepath.Rel(base.String(), target.String())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return relative == "." || relative == stripPathShortcuts(relative)
|
||||
}
|
||||
|
||||
// remotePath represents always UNIX path, its methods will use path
|
||||
// package which is always using `/`
|
||||
type remotePath struct {
|
||||
file string
|
||||
}
|
||||
|
||||
func newRemotePath(fileName string) remotePath {
|
||||
// we assume remote file is a linux container but we need to convert
|
||||
// windows path separators to unix style for consistent processing
|
||||
file := strings.ReplaceAll(stripTrailingSlash(fileName), `\`, "/")
|
||||
return remotePath{file: file}
|
||||
}
|
||||
|
||||
func (p remotePath) String() string {
|
||||
return p.file
|
||||
}
|
||||
|
||||
func (p remotePath) Dir() remotePath {
|
||||
return newRemotePath(path.Dir(p.file))
|
||||
}
|
||||
|
||||
func (p remotePath) Base() remotePath {
|
||||
return newRemotePath(path.Base(p.file))
|
||||
}
|
||||
|
||||
func (p remotePath) Clean() remotePath {
|
||||
return newRemotePath(path.Clean(p.file))
|
||||
}
|
||||
|
||||
func (p remotePath) Join(elem pathSpec) remotePath {
|
||||
return newRemotePath(path.Join(p.file, elem.String()))
|
||||
}
|
||||
|
||||
func (p remotePath) StripShortcuts() remotePath {
|
||||
p = p.Clean()
|
||||
return newRemotePath(stripPathShortcuts(p.file))
|
||||
}
|
||||
|
||||
func (p remotePath) StripSlashes() remotePath {
|
||||
return newRemotePath(stripLeadingSlash(p.file))
|
||||
}
|
||||
|
||||
// strips trailing slash (if any) both unix and windows style
|
||||
func stripTrailingSlash(file string) string {
|
||||
if len(file) == 0 {
|
||||
return file
|
||||
}
|
||||
if file != "/" && strings.HasSuffix(string(file[len(file)-1]), "/") {
|
||||
return file[:len(file)-1]
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func stripLeadingSlash(file string) string {
|
||||
// tar strips the leading '/' and '\' if it's there, so we will too
|
||||
return strings.TrimLeft(file, `/\`)
|
||||
}
|
||||
|
||||
// stripPathShortcuts removes any leading or trailing "../" from a given path
|
||||
func stripPathShortcuts(p string) string {
|
||||
newPath := p
|
||||
trimmed := strings.TrimPrefix(newPath, "../")
|
||||
|
||||
for trimmed != newPath {
|
||||
newPath = trimmed
|
||||
trimmed = strings.TrimPrefix(newPath, "../")
|
||||
}
|
||||
|
||||
// trim leftover {".", ".."}
|
||||
if newPath == "." || newPath == ".." {
|
||||
newPath = ""
|
||||
}
|
||||
|
||||
if len(newPath) > 0 && string(newPath[0]) == "/" {
|
||||
return newPath[1:]
|
||||
}
|
||||
|
||||
return newPath
|
||||
}
|
4
vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go
generated
vendored
4
vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go
generated
vendored
@ -198,13 +198,13 @@ func (o *CreateSecretDockerRegistryOptions) Complete(f cmdutil.Factory, cmd *cob
|
||||
|
||||
o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy)
|
||||
printer, err := o.PrintFlags.ToPrinter()
|
||||
if err != nil {
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
o.PrintObj = func(obj runtime.Object) error {
|
||||
|
23
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/deprecated.go
generated
vendored
23
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/deprecated.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
)
|
||||
@ -28,6 +29,10 @@ import (
|
||||
// DeprecatedOptions contains deprecated options and their flags.
|
||||
// TODO remove these fields once the deprecated flags are removed.
|
||||
type DeprecatedOptions struct {
|
||||
componentbaseconfig.DebuggingConfiguration
|
||||
componentbaseconfig.ClientConnectionConfiguration
|
||||
// Note that only the deprecated options (lock-object-name and lock-object-namespace) are populated here.
|
||||
componentbaseconfig.LeaderElectionConfiguration
|
||||
// The fields below here are placeholders for flags that can't be directly
|
||||
// mapped into componentconfig.KubeSchedulerConfiguration.
|
||||
PolicyConfigFile string
|
||||
@ -37,7 +42,7 @@ type DeprecatedOptions struct {
|
||||
}
|
||||
|
||||
// AddFlags adds flags for the deprecated options.
|
||||
func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *kubeschedulerconfig.KubeSchedulerConfiguration) {
|
||||
func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
@ -48,14 +53,14 @@ func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *kubeschedulerconfig
|
||||
fs.StringVar(&o.PolicyConfigMapNamespace, "policy-configmap-namespace", o.PolicyConfigMapNamespace, "DEPRECATED: the namespace where policy ConfigMap is located. The kube-system namespace will be used if this is not provided or is empty. Note: The predicates/priorities defined in this file will take precedence over any profiles define in ComponentConfig.")
|
||||
fs.BoolVar(&o.UseLegacyPolicyConfig, "use-legacy-policy-config", o.UseLegacyPolicyConfig, "DEPRECATED: when set to true, scheduler will ignore policy ConfigMap and uses policy config file. Note: The scheduler will fail if this is combined with Plugin configs")
|
||||
|
||||
fs.BoolVar(&cfg.EnableProfiling, "profiling", cfg.EnableProfiling, "DEPRECATED: enable profiling via web interface host:port/debug/pprof/. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.BoolVar(&cfg.EnableContentionProfiling, "contention-profiling", cfg.EnableContentionProfiling, "DEPRECATED: enable lock contention profiling, if profiling is enabled. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&cfg.ClientConnection.Kubeconfig, "kubeconfig", cfg.ClientConnection.Kubeconfig, "DEPRECATED: path to kubeconfig file with authorization and master location information. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&cfg.ClientConnection.ContentType, "kube-api-content-type", cfg.ClientConnection.ContentType, "DEPRECATED: content type of requests sent to apiserver. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.Float32Var(&cfg.ClientConnection.QPS, "kube-api-qps", cfg.ClientConnection.QPS, "DEPRECATED: QPS to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.Int32Var(&cfg.ClientConnection.Burst, "kube-api-burst", cfg.ClientConnection.Burst, "DEPRECATED: burst to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&cfg.LeaderElection.ResourceNamespace, "lock-object-namespace", cfg.LeaderElection.ResourceNamespace, "DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&cfg.LeaderElection.ResourceName, "lock-object-name", cfg.LeaderElection.ResourceName, "DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.BoolVar(&o.EnableProfiling, "profiling", true, "DEPRECATED: enable profiling via web interface host:port/debug/pprof/. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.BoolVar(&o.EnableContentionProfiling, "contention-profiling", true, "DEPRECATED: enable lock contention profiling, if profiling is enabled. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&o.Kubeconfig, "kubeconfig", "", "DEPRECATED: path to kubeconfig file with authorization and master location information. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&o.ContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "DEPRECATED: content type of requests sent to apiserver. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.Float32Var(&o.QPS, "kube-api-qps", 50.0, "DEPRECATED: QPS to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.Int32Var(&o.Burst, "kube-api-burst", 100, "DEPRECATED: burst to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&o.ResourceNamespace, "lock-object-namespace", "kube-system", "DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&o.ResourceName, "lock-object-name", "kube-scheduler", "DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name. This parameter is ignored if a config file is specified in --config.")
|
||||
}
|
||||
|
||||
// Validate validates the deprecated scheduler options.
|
||||
|
4
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/insecure_serving.go
generated
vendored
4
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/insecure_serving.go
generated
vendored
@ -44,10 +44,10 @@ func (o *CombinedInsecureServingOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
return
|
||||
}
|
||||
|
||||
fs.StringVar(&o.BindAddress, "address", o.BindAddress, "DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 or :: for listening in all interfaces and IP families). See --bind-address instead. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.StringVar(&o.BindAddress, "address", "0.0.0.0", "DEPRECATED: the IP address on which to listen for the --port port (set to 0.0.0.0 or :: for listening in all interfaces and IP families). See --bind-address instead. This parameter is ignored if a config file is specified in --config.")
|
||||
// MarkDeprecated hides the flag from the help. We don't want that:
|
||||
// fs.MarkDeprecated("address", "see --bind-address instead.")
|
||||
fs.IntVar(&o.BindPort, "port", o.BindPort, "DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. This parameter is ignored if a config file is specified in --config.")
|
||||
fs.IntVar(&o.BindPort, "port", kubeschedulerconfig.DefaultInsecureSchedulerPort, "DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. This parameter is ignored if a config file is specified in --config.")
|
||||
// MarkDeprecated hides the flag from the help. We don't want that:
|
||||
// fs.MarkDeprecated("port", "see --secure-port instead.")
|
||||
}
|
||||
|
118
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/options.go
generated
vendored
118
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/options/options.go
generated
vendored
@ -43,14 +43,13 @@ import (
|
||||
schedulerappconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/latest"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
)
|
||||
|
||||
// Options has all the params needed to run a Scheduler
|
||||
type Options struct {
|
||||
// The default values. These are overridden if ConfigFile is set or by values in InsecureServing.
|
||||
ComponentConfig kubeschedulerconfig.KubeSchedulerConfiguration
|
||||
ComponentConfig *kubeschedulerconfig.KubeSchedulerConfiguration
|
||||
|
||||
SecureServing *apiserveroptions.SecureServingOptionsWithLoopback
|
||||
CombinedInsecureServing *CombinedInsecureServingOptions
|
||||
@ -59,6 +58,7 @@ type Options struct {
|
||||
Metrics *metrics.Options
|
||||
Logs *logs.Options
|
||||
Deprecated *DeprecatedOptions
|
||||
LeaderElection *componentbaseconfig.LeaderElectionConfiguration
|
||||
|
||||
// ConfigFile is the location of the scheduler server's configuration file.
|
||||
ConfigFile string
|
||||
@ -67,10 +67,13 @@ type Options struct {
|
||||
WriteConfigTo string
|
||||
|
||||
Master string
|
||||
|
||||
// Flags hold the parsed CLI flags.
|
||||
Flags *cliflag.NamedFlagSets
|
||||
}
|
||||
|
||||
// NewOptions returns default scheduler app options.
|
||||
func NewOptions() (*Options, error) {
|
||||
func NewOptions() *Options {
|
||||
o := &Options{
|
||||
SecureServing: apiserveroptions.NewSecureServingOptions().WithLoopback(),
|
||||
CombinedInsecureServing: &CombinedInsecureServingOptions{
|
||||
@ -86,6 +89,15 @@ func NewOptions() (*Options, error) {
|
||||
UseLegacyPolicyConfig: false,
|
||||
PolicyConfigMapNamespace: metav1.NamespaceSystem,
|
||||
},
|
||||
LeaderElection: &componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "leases",
|
||||
ResourceName: "kube-scheduler",
|
||||
ResourceNamespace: "kube-system",
|
||||
},
|
||||
Metrics: metrics.NewOptions(),
|
||||
Logs: logs.NewOptions(),
|
||||
}
|
||||
@ -99,82 +111,76 @@ func NewOptions() (*Options, error) {
|
||||
o.SecureServing.ServerCert.PairName = "kube-scheduler"
|
||||
o.SecureServing.BindPort = kubeschedulerconfig.DefaultKubeSchedulerPort
|
||||
|
||||
return o, nil
|
||||
o.initFlags()
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// Complete completes the remaining instantiation of the options obj.
|
||||
// In particular, it injects the latest internal versioned ComponentConfig.
|
||||
func (o *Options) Complete(nfs *cliflag.NamedFlagSets) error {
|
||||
cfg, err := latest.Default()
|
||||
if err != nil {
|
||||
return err
|
||||
// ApplyDeprecated obtains the deprecated CLI args and set them to `o.ComponentConfig` if specified.
|
||||
func (o *Options) ApplyDeprecated() {
|
||||
if o.Flags == nil {
|
||||
return
|
||||
}
|
||||
|
||||
hhost, hport, err := splitHostIntPort(cfg.HealthzBindAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Obtain CLI args related with insecure serving.
|
||||
// If not specified in command line, derive the default settings from cfg.
|
||||
insecureServing := nfs.FlagSet("insecure serving")
|
||||
if !insecureServing.Changed("address") {
|
||||
o.CombinedInsecureServing.BindAddress = hhost
|
||||
}
|
||||
if !insecureServing.Changed("port") {
|
||||
o.CombinedInsecureServing.BindPort = hport
|
||||
}
|
||||
// Obtain deprecated CLI args. Set them to cfg if specified in command line.
|
||||
deprecated := nfs.FlagSet("deprecated")
|
||||
deprecated := o.Flags.FlagSet("deprecated")
|
||||
if deprecated.Changed("profiling") {
|
||||
cfg.EnableProfiling = o.ComponentConfig.EnableProfiling
|
||||
o.ComponentConfig.EnableProfiling = o.Deprecated.EnableProfiling
|
||||
}
|
||||
if deprecated.Changed("contention-profiling") {
|
||||
cfg.EnableContentionProfiling = o.ComponentConfig.EnableContentionProfiling
|
||||
o.ComponentConfig.EnableContentionProfiling = o.Deprecated.EnableContentionProfiling
|
||||
}
|
||||
if deprecated.Changed("kubeconfig") {
|
||||
cfg.ClientConnection.Kubeconfig = o.ComponentConfig.ClientConnection.Kubeconfig
|
||||
o.ComponentConfig.ClientConnection.Kubeconfig = o.Deprecated.Kubeconfig
|
||||
}
|
||||
if deprecated.Changed("kube-api-content-type") {
|
||||
cfg.ClientConnection.ContentType = o.ComponentConfig.ClientConnection.ContentType
|
||||
o.ComponentConfig.ClientConnection.ContentType = o.Deprecated.ContentType
|
||||
}
|
||||
if deprecated.Changed("kube-api-qps") {
|
||||
cfg.ClientConnection.QPS = o.ComponentConfig.ClientConnection.QPS
|
||||
o.ComponentConfig.ClientConnection.QPS = o.Deprecated.QPS
|
||||
}
|
||||
if deprecated.Changed("kube-api-burst") {
|
||||
cfg.ClientConnection.Burst = o.ComponentConfig.ClientConnection.Burst
|
||||
o.ComponentConfig.ClientConnection.Burst = o.Deprecated.Burst
|
||||
}
|
||||
if deprecated.Changed("lock-object-namespace") {
|
||||
cfg.LeaderElection.ResourceNamespace = o.ComponentConfig.LeaderElection.ResourceNamespace
|
||||
o.ComponentConfig.LeaderElection.ResourceNamespace = o.Deprecated.ResourceNamespace
|
||||
}
|
||||
if deprecated.Changed("lock-object-name") {
|
||||
cfg.LeaderElection.ResourceName = o.ComponentConfig.LeaderElection.ResourceName
|
||||
o.ComponentConfig.LeaderElection.ResourceName = o.Deprecated.ResourceName
|
||||
}
|
||||
// Obtain CLI args related with leaderelection. Set them to cfg if specified in command line.
|
||||
leaderelection := nfs.FlagSet("leader election")
|
||||
}
|
||||
|
||||
// ApplyLeaderElectionTo obtains the CLI args related with leaderelection, and override the values in `cfg`.
|
||||
// Then the `cfg` object is injected into the `options` object.
|
||||
func (o *Options) ApplyLeaderElectionTo(cfg *kubeschedulerconfig.KubeSchedulerConfiguration) {
|
||||
if o.Flags == nil {
|
||||
return
|
||||
}
|
||||
// Obtain CLI args related with leaderelection. Set them to `cfg` if specified in command line.
|
||||
leaderelection := o.Flags.FlagSet("leader election")
|
||||
if leaderelection.Changed("leader-elect") {
|
||||
cfg.LeaderElection.LeaderElect = o.ComponentConfig.LeaderElection.LeaderElect
|
||||
cfg.LeaderElection.LeaderElect = o.LeaderElection.LeaderElect
|
||||
}
|
||||
if leaderelection.Changed("leader-elect-lease-duration") {
|
||||
cfg.LeaderElection.LeaseDuration = o.ComponentConfig.LeaderElection.LeaseDuration
|
||||
cfg.LeaderElection.LeaseDuration = o.LeaderElection.LeaseDuration
|
||||
}
|
||||
if leaderelection.Changed("leader-elect-renew-deadline") {
|
||||
cfg.LeaderElection.RenewDeadline = o.ComponentConfig.LeaderElection.RenewDeadline
|
||||
cfg.LeaderElection.RenewDeadline = o.LeaderElection.RenewDeadline
|
||||
}
|
||||
if leaderelection.Changed("leader-elect-retry-period") {
|
||||
cfg.LeaderElection.RetryPeriod = o.ComponentConfig.LeaderElection.RetryPeriod
|
||||
cfg.LeaderElection.RetryPeriod = o.LeaderElection.RetryPeriod
|
||||
}
|
||||
if leaderelection.Changed("leader-elect-resource-lock") {
|
||||
cfg.LeaderElection.ResourceLock = o.ComponentConfig.LeaderElection.ResourceLock
|
||||
cfg.LeaderElection.ResourceLock = o.LeaderElection.ResourceLock
|
||||
}
|
||||
if leaderelection.Changed("leader-elect-resource-name") {
|
||||
cfg.LeaderElection.ResourceName = o.ComponentConfig.LeaderElection.ResourceName
|
||||
cfg.LeaderElection.ResourceName = o.LeaderElection.ResourceName
|
||||
}
|
||||
if leaderelection.Changed("leader-elect-resource-namespace") {
|
||||
cfg.LeaderElection.ResourceNamespace = o.ComponentConfig.LeaderElection.ResourceNamespace
|
||||
cfg.LeaderElection.ResourceNamespace = o.LeaderElection.ResourceNamespace
|
||||
}
|
||||
|
||||
o.ComponentConfig = *cfg
|
||||
return nil
|
||||
o.ComponentConfig = cfg
|
||||
}
|
||||
|
||||
func splitHostIntPort(s string) (string, int, error) {
|
||||
@ -189,8 +195,13 @@ func splitHostIntPort(s string) (string, int, error) {
|
||||
return host, portInt, err
|
||||
}
|
||||
|
||||
// Flags returns flags for a specific scheduler by section name
|
||||
func (o *Options) Flags() (nfs cliflag.NamedFlagSets) {
|
||||
// initFlags initializes flags by section name.
|
||||
func (o *Options) initFlags() {
|
||||
if o.Flags != nil {
|
||||
return
|
||||
}
|
||||
|
||||
nfs := cliflag.NamedFlagSets{}
|
||||
fs := nfs.FlagSet("misc")
|
||||
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, `The path to the configuration file. The following flags can overwrite fields in this file:
|
||||
--policy-config-file
|
||||
@ -203,20 +214,22 @@ func (o *Options) Flags() (nfs cliflag.NamedFlagSets) {
|
||||
o.CombinedInsecureServing.AddFlags(nfs.FlagSet("insecure serving"))
|
||||
o.Authentication.AddFlags(nfs.FlagSet("authentication"))
|
||||
o.Authorization.AddFlags(nfs.FlagSet("authorization"))
|
||||
o.Deprecated.AddFlags(nfs.FlagSet("deprecated"), &o.ComponentConfig)
|
||||
|
||||
options.BindLeaderElectionFlags(&o.ComponentConfig.LeaderElection, nfs.FlagSet("leader election"))
|
||||
o.Deprecated.AddFlags(nfs.FlagSet("deprecated"))
|
||||
options.BindLeaderElectionFlags(o.LeaderElection, nfs.FlagSet("leader election"))
|
||||
utilfeature.DefaultMutableFeatureGate.AddFlag(nfs.FlagSet("feature gate"))
|
||||
o.Metrics.AddFlags(nfs.FlagSet("metrics"))
|
||||
o.Logs.AddFlags(nfs.FlagSet("logs"))
|
||||
|
||||
return nfs
|
||||
o.Flags = &nfs
|
||||
}
|
||||
|
||||
// ApplyTo applies the scheduler options to the given scheduler app configuration.
|
||||
func (o *Options) ApplyTo(c *schedulerappconfig.Config) error {
|
||||
if len(o.ConfigFile) == 0 {
|
||||
c.ComponentConfig = o.ComponentConfig
|
||||
// If the --config arg is not specified, honor the deprecated as well as leader election CLI args.
|
||||
o.ApplyDeprecated()
|
||||
o.ApplyLeaderElectionTo(o.ComponentConfig)
|
||||
c.ComponentConfig = *o.ComponentConfig
|
||||
|
||||
o.Deprecated.ApplyTo(c)
|
||||
|
||||
@ -228,6 +241,9 @@ func (o *Options) ApplyTo(c *schedulerappconfig.Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If the --config arg is specified, honor the leader election CLI args only.
|
||||
o.ApplyLeaderElectionTo(cfg)
|
||||
|
||||
if err := validation.ValidateKubeSchedulerConfiguration(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -269,7 +285,7 @@ func (o *Options) ApplyTo(c *schedulerappconfig.Config) error {
|
||||
func (o *Options) Validate() []error {
|
||||
var errs []error
|
||||
|
||||
if err := validation.ValidateKubeSchedulerConfiguration(&o.ComponentConfig); err != nil {
|
||||
if err := validation.ValidateKubeSchedulerConfiguration(o.ComponentConfig); err != nil {
|
||||
errs = append(errs, err.Errors()...)
|
||||
}
|
||||
errs = append(errs, o.SecureServing.Validate()...)
|
||||
|
27
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/server.go
generated
vendored
27
vendor/k8s.io/kubernetes/cmd/kube-scheduler/app/server.go
generated
vendored
@ -53,6 +53,7 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/latest"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics/resources"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
@ -63,12 +64,8 @@ type Option func(runtime.Registry) error
|
||||
|
||||
// NewSchedulerCommand creates a *cobra.Command object with default parameters and registryOptions
|
||||
func NewSchedulerCommand(registryOptions ...Option) *cobra.Command {
|
||||
opts, err := options.NewOptions()
|
||||
if err != nil {
|
||||
klog.Fatalf("unable to initialize command options: %v", err)
|
||||
}
|
||||
opts := options.NewOptions()
|
||||
|
||||
namedFlagSets := opts.Flags()
|
||||
cmd := &cobra.Command{
|
||||
Use: "kube-scheduler",
|
||||
Long: `The Kubernetes scheduler is a control plane process which assigns
|
||||
@ -80,10 +77,6 @@ kube-scheduler is the reference implementation.
|
||||
See [scheduling](https://kubernetes.io/docs/concepts/scheduling-eviction/)
|
||||
for more information about scheduling and the kube-scheduler component.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := opts.Complete(&namedFlagSets); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := runCommand(cmd, opts, registryOptions...); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
@ -98,15 +91,17 @@ for more information about scheduling and the kube-scheduler component.`,
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
nfs := opts.Flags
|
||||
verflag.AddFlags(nfs.FlagSet("global"))
|
||||
globalflag.AddGlobalFlags(nfs.FlagSet("global"), cmd.Name())
|
||||
fs := cmd.Flags()
|
||||
verflag.AddFlags(namedFlagSets.FlagSet("global"))
|
||||
globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name())
|
||||
for _, f := range namedFlagSets.FlagSets {
|
||||
for _, f := range nfs.FlagSets {
|
||||
fs.AddFlagSet(f)
|
||||
}
|
||||
|
||||
cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
|
||||
cliflag.SetUsageAndHelpFunc(cmd, namedFlagSets, cols)
|
||||
cliflag.SetUsageAndHelpFunc(cmd, *nfs, cols)
|
||||
|
||||
cmd.MarkFlagFilename("config", "yaml", "yml", "json")
|
||||
|
||||
@ -308,6 +303,12 @@ func WithPlugin(name string, factory runtime.PluginFactory) Option {
|
||||
|
||||
// Setup creates a completed config and a scheduler based on the command args and options
|
||||
func Setup(ctx context.Context, opts *options.Options, outOfTreeRegistryOptions ...Option) (*schedulerserverconfig.CompletedConfig, *scheduler.Scheduler, error) {
|
||||
if cfg, err := latest.Default(); err != nil {
|
||||
return nil, nil, err
|
||||
} else {
|
||||
opts.ComponentConfig = cfg
|
||||
}
|
||||
|
||||
if errs := opts.Validate(); len(errs) > 0 {
|
||||
return nil, nil, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
@ -304,12 +304,11 @@ func (c *Controller) syncEndpoints(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// This means that if a Service transitions away from a nil selector, any
|
||||
// mirrored EndpointSlices will not be cleaned up. #91072 tracks this issue
|
||||
// for this controller along with the Endpoints and EndpointSlice
|
||||
// controllers.
|
||||
// If a selector is specified, clean up any mirrored slices.
|
||||
if svc.Spec.Selector != nil {
|
||||
return nil
|
||||
klog.V(4).Infof("%s/%s Service now has selector, cleaning up any mirrored EndpointSlices", namespace, name)
|
||||
c.endpointSliceTracker.DeleteService(namespace, name)
|
||||
return c.deleteMirroredSlices(namespace, name)
|
||||
}
|
||||
|
||||
endpointSlices, err := endpointSlicesMirroredForService(c.endpointSliceLister, namespace, name)
|
||||
@ -372,7 +371,7 @@ func (c *Controller) onServiceUpdate(prevObj, obj interface{}) {
|
||||
service := obj.(*v1.Service)
|
||||
prevService := prevObj.(*v1.Service)
|
||||
if service == nil || prevService == nil {
|
||||
utilruntime.HandleError(fmt.Errorf("onServiceUpdate() expected type v1.Endpoints, got %T, %T", prevObj, obj))
|
||||
utilruntime.HandleError(fmt.Errorf("onServiceUpdate() expected type v1.Service, got %T, %T", prevObj, obj))
|
||||
return
|
||||
}
|
||||
if (service.Spec.Selector == nil) != (prevService.Spec.Selector == nil) {
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler/reconciler.go
generated
vendored
@ -151,13 +151,13 @@ func (rc *reconciler) reconcile() {
|
||||
// The operation key format is different depending on whether the volume
|
||||
// allows multi attach across different nodes.
|
||||
if util.IsMultiAttachAllowed(attachedVolume.VolumeSpec) {
|
||||
if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "" /* podName */, attachedVolume.NodeName) {
|
||||
klog.V(10).Infof("Operation for volume %q is already running for node %q. Can't start detach", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, attachedVolume.NodeName, operationexecutor.DetachOperationName) {
|
||||
klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff for node %q. Can't start detach", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if rc.attacherDetacher.IsOperationPending(attachedVolume.VolumeName, "" /* podName */, "" /* nodeName */) {
|
||||
klog.V(10).Infof("Operation for volume %q is already running in the cluster. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, "" /* nodeName */, operationexecutor.DetachOperationName) {
|
||||
klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff in the cluster. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -193,6 +193,8 @@ func (rc *reconciler) reconcile() {
|
||||
|
||||
// Before triggering volume detach, mark volume as detached and update the node status
|
||||
// If it fails to update node status, skip detach volume
|
||||
// If volume detach operation fails, the volume needs to be added back to report as attached so that node status
|
||||
// has the correct volume attachment information.
|
||||
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
|
||||
@ -222,10 +224,17 @@ func (rc *reconciler) reconcile() {
|
||||
klog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
|
||||
}
|
||||
}
|
||||
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
klog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
|
||||
if err != nil {
|
||||
// Add volume back to ReportAsAttached if DetachVolume call failed so that node status updater will add it back to VolumeAttached list.
|
||||
// This function is also called during executing the volume detach operation in operation_generoator.
|
||||
// It is needed here too because DetachVolume call might fail before executing the actual operation in operation_executor (e.g., cannot find volume plugin etc.)
|
||||
rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
|
||||
if !exponentialbackoff.IsExponentialBackoff(err) {
|
||||
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
|
||||
// Log all other errors.
|
||||
klog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -350,7 +350,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get key for Persistent Volume Claim %#v: %v", pvc, err))
|
||||
return
|
||||
}
|
||||
klog.V(4).InfoS("Got event on PVC", key)
|
||||
klog.V(4).InfoS("Got event on PVC", "pvc", klog.KObj(pvc))
|
||||
|
||||
if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) || protectionutil.IsDeletionCandidate(pvc, volumeutil.PVCProtectionFinalizer) {
|
||||
c.queue.Add(key)
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs/logs.go
generated
vendored
@ -361,6 +361,7 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r
|
||||
}
|
||||
return fmt.Errorf("failed to open log file %q: %v", path, err)
|
||||
}
|
||||
defer newF.Close()
|
||||
f.Close()
|
||||
if err := watcher.Remove(f.Name()); err != nil && !os.IsNotExist(err) {
|
||||
klog.ErrorS(err, "Failed to remove file watch", "path", f.Name())
|
||||
|
33
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go
generated
vendored
@ -121,7 +121,9 @@ func (info *BaseEndpointInfo) Port() (int, error) {
|
||||
|
||||
// Equal is part of proxy.Endpoint interface.
|
||||
func (info *BaseEndpointInfo) Equal(other Endpoint) bool {
|
||||
return info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal()
|
||||
return info.String() == other.String() &&
|
||||
info.GetIsLocal() == other.GetIsLocal() &&
|
||||
info.IsReady() == other.IsReady()
|
||||
}
|
||||
|
||||
// GetNodeName returns the NodeName for this endpoint.
|
||||
@ -536,13 +538,22 @@ func (em EndpointsMap) getLocalReadyEndpointIPs() map[types.NamespacedName]sets.
|
||||
// detectStaleConnections modifies <staleEndpoints> and <staleServices> with detected stale connections. <staleServiceNames>
|
||||
// is used to store stale udp service in order to clear udp conntrack later.
|
||||
func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, staleEndpoints *[]ServiceEndpoint, staleServiceNames *[]ServicePortName) {
|
||||
// Detect stale endpoints: an endpoint can have stale conntrack entries if it was receiving traffic
|
||||
// and then goes unready or changes its IP address.
|
||||
for svcPortName, epList := range oldEndpointsMap {
|
||||
if svcPortName.Protocol != v1.ProtocolUDP {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ep := range epList {
|
||||
// if the old endpoint wasn't ready is not possible to have stale entries
|
||||
// since there was no traffic sent to it.
|
||||
if !ep.IsReady() {
|
||||
continue
|
||||
}
|
||||
stale := true
|
||||
// Check if the endpoint has changed, including if it went from ready to not ready.
|
||||
// If it did change stale entries for the old endpoint has to be cleared.
|
||||
for i := range newEndpointsMap[svcPortName] {
|
||||
if newEndpointsMap[svcPortName][i].Equal(ep) {
|
||||
stale = false
|
||||
@ -556,13 +567,29 @@ func detectStaleConnections(oldEndpointsMap, newEndpointsMap EndpointsMap, stale
|
||||
}
|
||||
}
|
||||
|
||||
// Detect stale services
|
||||
// For udp service, if its backend changes from 0 to non-0 ready endpoints.
|
||||
// There may exist a conntrack entry that could blackhole traffic to the service.
|
||||
for svcPortName, epList := range newEndpointsMap {
|
||||
if svcPortName.Protocol != v1.ProtocolUDP {
|
||||
continue
|
||||
}
|
||||
|
||||
// For udp service, if its backend changes from 0 to non-0. There may exist a conntrack entry that could blackhole traffic to the service.
|
||||
if len(epList) > 0 && len(oldEndpointsMap[svcPortName]) == 0 {
|
||||
epReady := 0
|
||||
for _, ep := range epList {
|
||||
if ep.IsReady() {
|
||||
epReady++
|
||||
}
|
||||
}
|
||||
|
||||
oldEpReady := 0
|
||||
for _, ep := range oldEndpointsMap[svcPortName] {
|
||||
if ep.IsReady() {
|
||||
oldEpReady++
|
||||
}
|
||||
}
|
||||
|
||||
if epReady > 0 && oldEpReady == 0 {
|
||||
*staleServiceNames = append(*staleServiceNames, svcPortName)
|
||||
}
|
||||
}
|
||||
|
186
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
186
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
@ -164,7 +164,8 @@ func (e *endpointsInfo) Equal(other proxy.Endpoint) bool {
|
||||
return e.Endpoint == o.Endpoint &&
|
||||
e.IsLocal == o.IsLocal &&
|
||||
e.protocol == o.protocol &&
|
||||
e.chainName == o.chainName
|
||||
e.chainName == o.chainName &&
|
||||
e.Ready == o.Ready
|
||||
}
|
||||
|
||||
// Returns the endpoint chain name for a given endpointsInfo.
|
||||
@ -974,12 +975,8 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// you should always do one of the below:
|
||||
// slice = slice[:0] // and then append to it
|
||||
// slice = append(slice[:0], ...)
|
||||
endpoints := make([]*endpointsInfo, 0)
|
||||
endpointChains := make([]utiliptables.Chain, 0)
|
||||
readyEndpoints := make([]*endpointsInfo, 0)
|
||||
readyEndpointChains := make([]utiliptables.Chain, 0)
|
||||
localReadyEndpointChains := make([]utiliptables.Chain, 0)
|
||||
localServingTerminatingEndpointChains := make([]utiliptables.Chain, 0)
|
||||
localEndpointChains := make([]utiliptables.Chain, 0)
|
||||
|
||||
// To avoid growing this slice, we arbitrarily set its size to 64,
|
||||
// there is never more than that many arguments for a single line.
|
||||
@ -1021,7 +1018,82 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
// Service does not have conflicting configuration such as
|
||||
// externalTrafficPolicy=Local.
|
||||
allEndpoints = proxy.FilterEndpoints(allEndpoints, svcInfo, proxier.nodeLabels)
|
||||
hasEndpoints := len(allEndpoints) > 0
|
||||
|
||||
// Scan the endpoints list to see what we have. "hasEndpoints" will be true
|
||||
// if there are any usable endpoints for this service anywhere in the cluster.
|
||||
var hasEndpoints, hasLocalReadyEndpoints, hasLocalServingTerminatingEndpoints bool
|
||||
for _, ep := range allEndpoints {
|
||||
if ep.IsReady() {
|
||||
hasEndpoints = true
|
||||
if ep.GetIsLocal() {
|
||||
hasLocalReadyEndpoints = true
|
||||
}
|
||||
} else if svc.NodeLocalExternal() && utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) {
|
||||
if ep.IsServing() && ep.IsTerminating() {
|
||||
hasEndpoints = true
|
||||
if ep.GetIsLocal() {
|
||||
hasLocalServingTerminatingEndpoints = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
useTerminatingEndpoints := !hasLocalReadyEndpoints && hasLocalServingTerminatingEndpoints
|
||||
|
||||
// Generate the per-endpoint chains.
|
||||
readyEndpointChains = readyEndpointChains[:0]
|
||||
localEndpointChains = localEndpointChains[:0]
|
||||
for _, ep := range allEndpoints {
|
||||
epInfo, ok := ep.(*endpointsInfo)
|
||||
if !ok {
|
||||
klog.ErrorS(err, "Failed to cast endpointsInfo", "endpointsInfo", ep)
|
||||
continue
|
||||
}
|
||||
|
||||
endpointChain := epInfo.endpointChain(svcNameString, protocol)
|
||||
endpointInUse := false
|
||||
|
||||
if epInfo.Ready {
|
||||
readyEndpointChains = append(readyEndpointChains, endpointChain)
|
||||
endpointInUse = true
|
||||
}
|
||||
if svc.NodeLocalExternal() && epInfo.IsLocal {
|
||||
if useTerminatingEndpoints {
|
||||
if epInfo.Serving && epInfo.Terminating {
|
||||
localEndpointChains = append(localEndpointChains, endpointChain)
|
||||
endpointInUse = true
|
||||
}
|
||||
} else if epInfo.Ready {
|
||||
localEndpointChains = append(localEndpointChains, endpointChain)
|
||||
endpointInUse = true
|
||||
}
|
||||
}
|
||||
|
||||
if !endpointInUse {
|
||||
continue
|
||||
}
|
||||
|
||||
// Create the endpoint chain, retaining counters if possible.
|
||||
if chain, ok := existingNATChains[endpointChain]; ok {
|
||||
utilproxy.WriteBytesLine(proxier.natChains, chain)
|
||||
} else {
|
||||
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(endpointChain))
|
||||
}
|
||||
activeNATChains[endpointChain] = true
|
||||
|
||||
args = append(args[:0], "-A", string(endpointChain))
|
||||
args = proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
// Handle traffic that loops back to the originator with SNAT.
|
||||
utilproxy.WriteLine(proxier.natRules, append(args,
|
||||
"-s", utilproxy.ToCIDR(net.ParseIP(epInfo.IP())),
|
||||
"-j", string(KubeMarkMasqChain))...)
|
||||
// Update client-affinity lists.
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
|
||||
}
|
||||
// DNAT to final destination.
|
||||
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", epInfo.Endpoint)
|
||||
utilproxy.WriteLine(proxier.natRules, args...)
|
||||
}
|
||||
|
||||
svcChain := svcInfo.servicePortChainName
|
||||
if hasEndpoints {
|
||||
@ -1338,35 +1410,9 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Generate the per-endpoint chains. We do this in multiple passes so we
|
||||
// can group rules together.
|
||||
// These two slices parallel each other - keep in sync
|
||||
endpoints = endpoints[:0]
|
||||
endpointChains = endpointChains[:0]
|
||||
var endpointChain utiliptables.Chain
|
||||
for _, ep := range allEndpoints {
|
||||
epInfo, ok := ep.(*endpointsInfo)
|
||||
if !ok {
|
||||
klog.ErrorS(err, "Failed to cast endpointsInfo", "endpointsInfo", ep.String())
|
||||
continue
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, epInfo)
|
||||
endpointChain = epInfo.endpointChain(svcNameString, protocol)
|
||||
endpointChains = append(endpointChains, endpointChain)
|
||||
|
||||
// Create the endpoint chain, retaining counters if possible.
|
||||
if chain, ok := existingNATChains[endpointChain]; ok {
|
||||
utilproxy.WriteBytesLine(proxier.natChains, chain)
|
||||
} else {
|
||||
utilproxy.WriteLine(proxier.natChains, utiliptables.MakeChainLine(endpointChain))
|
||||
}
|
||||
activeNATChains[endpointChain] = true
|
||||
}
|
||||
|
||||
// First write session affinity rules, if applicable.
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
for _, endpointChain := range endpointChains {
|
||||
for _, endpointChain := range readyEndpointChains {
|
||||
args = append(args[:0],
|
||||
"-A", string(svcChain),
|
||||
)
|
||||
@ -1380,38 +1426,9 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
}
|
||||
}
|
||||
|
||||
// Firstly, categorize each endpoint into three buckets:
|
||||
// 1. all endpoints that are ready and NOT terminating.
|
||||
// 2. all endpoints that are local, ready and NOT terminating, and externalTrafficPolicy=Local
|
||||
// 3. all endpoints that are local, serving and terminating, and externalTrafficPolicy=Local
|
||||
readyEndpointChains = readyEndpointChains[:0]
|
||||
readyEndpoints := readyEndpoints[:0]
|
||||
localReadyEndpointChains := localReadyEndpointChains[:0]
|
||||
localServingTerminatingEndpointChains := localServingTerminatingEndpointChains[:0]
|
||||
for i, endpointChain := range endpointChains {
|
||||
if endpoints[i].Ready {
|
||||
readyEndpointChains = append(readyEndpointChains, endpointChain)
|
||||
readyEndpoints = append(readyEndpoints, endpoints[i])
|
||||
}
|
||||
|
||||
if svc.NodeLocalExternal() && endpoints[i].IsLocal {
|
||||
if endpoints[i].Ready {
|
||||
localReadyEndpointChains = append(localReadyEndpointChains, endpointChain)
|
||||
} else if endpoints[i].Serving && endpoints[i].Terminating {
|
||||
localServingTerminatingEndpointChains = append(localServingTerminatingEndpointChains, endpointChain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now write loadbalancing & DNAT rules.
|
||||
// Now write loadbalancing rules
|
||||
numReadyEndpoints := len(readyEndpointChains)
|
||||
for i, endpointChain := range readyEndpointChains {
|
||||
epIP := readyEndpoints[i].IP()
|
||||
if epIP == "" {
|
||||
// Error parsing this endpoint has been logged. Skip to next endpoint.
|
||||
continue
|
||||
}
|
||||
|
||||
// Balancing rules in the per-service chain.
|
||||
args = append(args[:0], "-A", string(svcChain))
|
||||
args = proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
@ -1427,31 +1444,6 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
utilproxy.WriteLine(proxier.natRules, args...)
|
||||
}
|
||||
|
||||
// Every endpoint gets a chain, regardless of its state. This is required later since we may
|
||||
// want to jump to endpoint chains that are terminating.
|
||||
for i, endpointChain := range endpointChains {
|
||||
epIP := endpoints[i].IP()
|
||||
if epIP == "" {
|
||||
// Error parsing this endpoint has been logged. Skip to next endpoint.
|
||||
continue
|
||||
}
|
||||
|
||||
// Rules in the per-endpoint chain.
|
||||
args = append(args[:0], "-A", string(endpointChain))
|
||||
args = proxier.appendServiceCommentLocked(args, svcNameString)
|
||||
// Handle traffic that loops back to the originator with SNAT.
|
||||
utilproxy.WriteLine(proxier.natRules, append(args,
|
||||
"-s", utilproxy.ToCIDR(net.ParseIP(epIP)),
|
||||
"-j", string(KubeMarkMasqChain))...)
|
||||
// Update client-affinity lists.
|
||||
if svcInfo.SessionAffinityType() == v1.ServiceAffinityClientIP {
|
||||
args = append(args, "-m", "recent", "--name", string(endpointChain), "--set")
|
||||
}
|
||||
// DNAT to final destination.
|
||||
args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", endpoints[i].Endpoint)
|
||||
utilproxy.WriteLine(proxier.natRules, args...)
|
||||
}
|
||||
|
||||
// The logic below this applies only if this service is marked as OnlyLocal
|
||||
if !svcInfo.NodeLocalExternal() {
|
||||
continue
|
||||
@ -1480,12 +1472,6 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
"-m", "comment", "--comment", fmt.Sprintf(`"route LOCAL traffic for %s LB IP to service chain"`, svcNameString),
|
||||
"-m", "addrtype", "--src-type", "LOCAL", "-j", string(svcChain))...)
|
||||
|
||||
// Prefer local ready endpoint chains, but fall back to ready terminating if none exist
|
||||
localEndpointChains := localReadyEndpointChains
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ProxyTerminatingEndpoints) && len(localEndpointChains) == 0 {
|
||||
localEndpointChains = localServingTerminatingEndpointChains
|
||||
}
|
||||
|
||||
numLocalEndpoints := len(localEndpointChains)
|
||||
if numLocalEndpoints == 0 {
|
||||
// Blackhole all traffic since there are no local endpoints
|
||||
@ -1615,6 +1601,11 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
"-j", "ACCEPT",
|
||||
)
|
||||
|
||||
numberFilterIptablesRules := utilproxy.CountBytesLines(proxier.filterRules.Bytes())
|
||||
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(numberFilterIptablesRules))
|
||||
numberNatIptablesRules := utilproxy.CountBytesLines(proxier.natRules.Bytes())
|
||||
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(numberNatIptablesRules))
|
||||
|
||||
// Write the end-of-table markers.
|
||||
utilproxy.WriteLine(proxier.filterRules, "COMMIT")
|
||||
utilproxy.WriteLine(proxier.natRules, "COMMIT")
|
||||
@ -1627,11 +1618,6 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
proxier.iptablesData.Write(proxier.natChains.Bytes())
|
||||
proxier.iptablesData.Write(proxier.natRules.Bytes())
|
||||
|
||||
numberFilterIptablesRules := utilproxy.CountBytesLines(proxier.filterRules.Bytes())
|
||||
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)).Set(float64(numberFilterIptablesRules))
|
||||
numberNatIptablesRules := utilproxy.CountBytesLines(proxier.natRules.Bytes())
|
||||
metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)).Set(float64(numberNatIptablesRules))
|
||||
|
||||
klog.V(5).InfoS("Restoring iptables", "rules", proxier.iptablesData.Bytes())
|
||||
err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters)
|
||||
if err != nil {
|
||||
|
@ -89,6 +89,7 @@ func NewBalancedAllocation(baArgs runtime.Object, h framework.Handle, fts featur
|
||||
resourceAllocationScorer: resourceAllocationScorer{
|
||||
Name: BalancedAllocationName,
|
||||
scorer: balancedResourceScorer,
|
||||
useRequested: true,
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
|
@ -32,7 +32,10 @@ type scorer func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer
|
||||
|
||||
// resourceAllocationScorer contains information to calculate resource allocation score.
|
||||
type resourceAllocationScorer struct {
|
||||
Name string
|
||||
Name string
|
||||
// used to decide whether to use Requested or NonZeroRequested for
|
||||
// cpu and memory.
|
||||
useRequested bool
|
||||
scorer func(requested, allocable resourceToValueMap) int64
|
||||
resourceToWeightMap resourceToWeightMap
|
||||
|
||||
@ -53,10 +56,11 @@ func (r *resourceAllocationScorer) score(
|
||||
if r.resourceToWeightMap == nil {
|
||||
return 0, framework.NewStatus(framework.Error, "resources not found")
|
||||
}
|
||||
|
||||
requested := make(resourceToValueMap)
|
||||
allocatable := make(resourceToValueMap)
|
||||
for resource := range r.resourceToWeightMap {
|
||||
alloc, req := calculateResourceAllocatableRequest(nodeInfo, pod, resource, r.enablePodOverhead)
|
||||
alloc, req := r.calculateResourceAllocatableRequest(nodeInfo, pod, resource)
|
||||
if alloc != 0 {
|
||||
// Only fill the extended resource entry when it's non-zero.
|
||||
allocatable[resource], requested[resource] = alloc, req
|
||||
@ -80,8 +84,13 @@ func (r *resourceAllocationScorer) score(
|
||||
// - 1st param: quantity of allocatable resource on the node.
|
||||
// - 2nd param: aggregated quantity of requested resource on the node.
|
||||
// Note: if it's an extended resource, and the pod doesn't request it, (0, 0) is returned.
|
||||
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) (int64, int64) {
|
||||
podRequest := calculatePodResourceRequest(pod, resource, enablePodOverhead)
|
||||
func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
|
||||
requested := nodeInfo.NonZeroRequested
|
||||
if r.useRequested {
|
||||
requested = nodeInfo.Requested
|
||||
}
|
||||
|
||||
podRequest := r.calculatePodResourceRequest(pod, resource)
|
||||
// If it's an extended resource, and the pod doesn't request it. We return (0, 0)
|
||||
// as an implication to bypass scoring on this resource.
|
||||
if podRequest == 0 && schedutil.IsScalarResourceName(resource) {
|
||||
@ -89,9 +98,9 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
|
||||
}
|
||||
switch resource {
|
||||
case v1.ResourceCPU:
|
||||
return nodeInfo.Allocatable.MilliCPU, (nodeInfo.NonZeroRequested.MilliCPU + podRequest)
|
||||
return nodeInfo.Allocatable.MilliCPU, (requested.MilliCPU + podRequest)
|
||||
case v1.ResourceMemory:
|
||||
return nodeInfo.Allocatable.Memory, (nodeInfo.NonZeroRequested.Memory + podRequest)
|
||||
return nodeInfo.Allocatable.Memory, (requested.Memory + podRequest)
|
||||
case v1.ResourceEphemeralStorage:
|
||||
return nodeInfo.Allocatable.EphemeralStorage, (nodeInfo.Requested.EphemeralStorage + podRequest)
|
||||
default:
|
||||
@ -108,24 +117,24 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
|
||||
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
||||
// PodOverhead feature is enabled, the Overhead is added to the result.
|
||||
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
||||
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) int64 {
|
||||
func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
var podRequest int64
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
value := schedutil.GetNonzeroRequestForResource(resource, &container.Resources.Requests)
|
||||
value := schedutil.GetRequestForResource(resource, &container.Resources.Requests, !r.useRequested)
|
||||
podRequest += value
|
||||
}
|
||||
|
||||
for i := range pod.Spec.InitContainers {
|
||||
initContainer := &pod.Spec.InitContainers[i]
|
||||
value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests)
|
||||
value := schedutil.GetRequestForResource(resource, &initContainer.Resources.Requests, !r.useRequested)
|
||||
if podRequest < value {
|
||||
podRequest = value
|
||||
}
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && enablePodOverhead {
|
||||
if pod.Spec.Overhead != nil && r.enablePodOverhead {
|
||||
if quantity, found := pod.Spec.Overhead[resource]; found {
|
||||
podRequest += quantity.Value()
|
||||
}
|
||||
|
@ -40,26 +40,27 @@ const (
|
||||
// GetNonzeroRequests returns the default cpu and memory resource request if none is found or
|
||||
// what is provided on the request.
|
||||
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||
return GetNonzeroRequestForResource(v1.ResourceCPU, requests),
|
||||
GetNonzeroRequestForResource(v1.ResourceMemory, requests)
|
||||
return GetRequestForResource(v1.ResourceCPU, requests, true),
|
||||
GetRequestForResource(v1.ResourceMemory, requests, true)
|
||||
}
|
||||
|
||||
// GetNonzeroRequestForResource returns the default resource request if none is found or
|
||||
// what is provided on the request.
|
||||
func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.ResourceList) int64 {
|
||||
// GetRequestForResource returns the requested values unless nonZero is true and there is no defined request
|
||||
// for CPU and memory.
|
||||
// If nonZero is true and the resource has no defined request for CPU or memory, it returns a default value.
|
||||
func GetRequestForResource(resource v1.ResourceName, requests *v1.ResourceList, nonZero bool) int64 {
|
||||
if requests == nil {
|
||||
return 0
|
||||
}
|
||||
switch resource {
|
||||
case v1.ResourceCPU:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found && nonZero {
|
||||
return DefaultMilliCPURequest
|
||||
}
|
||||
return requests.Cpu().MilliValue()
|
||||
case v1.ResourceMemory:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found && nonZero {
|
||||
return DefaultMemoryRequest
|
||||
}
|
||||
return requests.Memory().Value()
|
3
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
@ -276,7 +276,8 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
||||
|
||||
c.supportsSELinux, err = c.kubeVolHost.GetHostUtil().GetSELinuxSupport(dir)
|
||||
if err != nil {
|
||||
klog.V(2).Info(log("error checking for SELinux support: %s", err))
|
||||
// The volume is mounted. Return UncertainProgressError, so kubelet will unmount it when user deletes the pod.
|
||||
return volumetypes.NewUncertainProgressError(fmt.Sprintf("error checking for SELinux support: %s", err))
|
||||
}
|
||||
|
||||
if !driverSupportsCSIVolumeMountGroup && c.supportsFSGroup(fsType, mounterArgs.FsGroup, c.fsGroupPolicy) {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/fake_hostutil.go
generated
vendored
@ -108,7 +108,7 @@ func (hu *FakeHostUtil) GetOwner(pathname string) (int64, int64, error) {
|
||||
// GetSELinuxSupport tests if pathname is on a mount that supports SELinux.
|
||||
// Not implemented for testing
|
||||
func (hu *FakeHostUtil) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
return false, errors.New("GetSELinuxSupport not implemented")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetMode returns permissions of pathname.
|
||||
|
13
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/volume/util/hostutil/hostutil_linux.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/util/selinux"
|
||||
"k8s.io/mount-utils"
|
||||
utilpath "k8s.io/utils/path"
|
||||
)
|
||||
@ -229,8 +230,16 @@ func DoMakeRShared(path string, mountInfoFilename string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// selinux.SELinuxEnabled implementation for unit tests
|
||||
type seLinuxEnabledFunc func() bool
|
||||
|
||||
// GetSELinux is common implementation of GetSELinuxSupport on Linux.
|
||||
func GetSELinux(path string, mountInfoFilename string) (bool, error) {
|
||||
func GetSELinux(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (bool, error) {
|
||||
// Skip /proc/mounts parsing if SELinux is disabled.
|
||||
if !selinuxEnabled() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
info, err := findMountInfo(path, mountInfoFilename)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -253,7 +262,7 @@ func GetSELinux(path string, mountInfoFilename string) (bool, error) {
|
||||
// GetSELinuxSupport returns true if given path is on a mount that supports
|
||||
// SELinux.
|
||||
func (hu *HostUtil) GetSELinuxSupport(pathname string) (bool, error) {
|
||||
return GetSELinux(pathname, procMountInfoPath)
|
||||
return GetSELinux(pathname, procMountInfoPath, selinux.SELinuxEnabled)
|
||||
}
|
||||
|
||||
// GetOwner returns the integer ID for the user and group of the given path
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8sRuntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/klog/v2"
|
||||
@ -106,6 +106,13 @@ type NestedPendingOperations interface {
|
||||
volumeName v1.UniqueVolumeName,
|
||||
podName volumetypes.UniquePodName,
|
||||
nodeName types.NodeName) bool
|
||||
|
||||
// IsOperationSafeToRetry returns false if an operation for the given volumeName
|
||||
// and one of podName or nodeName is pending or in exponential backoff, otherwise it returns true
|
||||
IsOperationSafeToRetry(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
podName volumetypes.UniquePodName,
|
||||
nodeName types.NodeName, operationName string) bool
|
||||
}
|
||||
|
||||
// NewNestedPendingOperations returns a new instance of NestedPendingOperations.
|
||||
@ -185,6 +192,33 @@ func (grm *nestedPendingOperations) Run(
|
||||
|
||||
return nil
|
||||
}
|
||||
func (grm *nestedPendingOperations) IsOperationSafeToRetry(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
podName volumetypes.UniquePodName,
|
||||
nodeName types.NodeName,
|
||||
operationName string) bool {
|
||||
|
||||
grm.lock.RLock()
|
||||
defer grm.lock.RUnlock()
|
||||
|
||||
opKey := operationKey{volumeName, podName, nodeName}
|
||||
exist, previousOpIndex := grm.isOperationExists(opKey)
|
||||
if !exist {
|
||||
return true
|
||||
}
|
||||
previousOp := grm.operations[previousOpIndex]
|
||||
if previousOp.operationPending {
|
||||
return false
|
||||
}
|
||||
backOffErr := previousOp.expBackoff.SafeToRetry(fmt.Sprintf("%+v", opKey))
|
||||
if backOffErr != nil {
|
||||
if previousOp.operationName == operationName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (grm *nestedPendingOperations) IsOperationPending(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_executor.go
generated
vendored
@ -141,6 +141,9 @@ type OperationExecutor interface {
|
||||
// IsOperationPending returns true if an operation for the given volumeName
|
||||
// and one of podName or nodeName is pending, otherwise it returns false
|
||||
IsOperationPending(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, nodeName types.NodeName) bool
|
||||
// IsOperationSafeToRetry returns false if an operation for the given volumeName
|
||||
// and one of podName or nodeName is pending or in exponential backoff, otherwise it returns true
|
||||
IsOperationSafeToRetry(volumeName v1.UniqueVolumeName, podName volumetypes.UniquePodName, nodeName types.NodeName, operationName string) bool
|
||||
// ExpandInUseVolume will resize volume's file system to expected size without unmounting the volume.
|
||||
ExpandInUseVolume(volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error
|
||||
// ReconstructVolumeOperation construct a new volumeSpec and returns it created by plugin
|
||||
@ -664,6 +667,14 @@ func (oe *operationExecutor) IsOperationPending(
|
||||
return oe.pendingOperations.IsOperationPending(volumeName, podName, nodeName)
|
||||
}
|
||||
|
||||
func (oe *operationExecutor) IsOperationSafeToRetry(
|
||||
volumeName v1.UniqueVolumeName,
|
||||
podName volumetypes.UniquePodName,
|
||||
nodeName types.NodeName,
|
||||
operationName string) bool {
|
||||
return oe.pendingOperations.IsOperationSafeToRetry(volumeName, podName, nodeName, operationName)
|
||||
}
|
||||
|
||||
func (oe *operationExecutor) AttachVolume(
|
||||
volumeToAttach VolumeToAttach,
|
||||
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/util/operationexecutor/operation_generator.go
generated
vendored
@ -48,6 +48,7 @@ import (
|
||||
const (
|
||||
unknownVolumePlugin string = "UnknownVolumePlugin"
|
||||
unknownAttachableVolumePlugin string = "UnknownAttachableVolumePlugin"
|
||||
DetachOperationName string = "volume_detach"
|
||||
)
|
||||
|
||||
// InTreeToCSITranslator contains methods required to check migratable status
|
||||
@ -491,9 +492,9 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
|
||||
}
|
||||
|
||||
return volumetypes.GeneratedOperations{
|
||||
OperationName: "volume_detach",
|
||||
OperationName: DetachOperationName,
|
||||
OperationFunc: detachVolumeFunc,
|
||||
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, volumeToDetach.VolumeSpec), "volume_detach"),
|
||||
CompleteFunc: util.OperationCompleteHook(util.GetFullQualifiedPluginNameForVolume(pluginName, volumeToDetach.VolumeSpec), DetachOperationName),
|
||||
EventRecorderFunc: nil, // nil because we do not want to generate event on error
|
||||
}, nil
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
26
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
@ -29,7 +29,6 @@ import (
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
||||
"k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
@ -109,12 +108,12 @@ func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, strin
|
||||
notMount = true
|
||||
}
|
||||
if !notMount {
|
||||
linuxHostUtil := hostutil.NewHostUtil()
|
||||
mntInfo, err := linuxHostUtil.FindMountInfo(bindPathTarget)
|
||||
// It's already mounted, so check if it's bind-mounted to the same path
|
||||
samePath, err := checkSubPathFileEqual(subpath, bindPathTarget)
|
||||
if err != nil {
|
||||
return false, "", fmt.Errorf("error calling findMountInfo for %s: %s", bindPathTarget, err)
|
||||
return false, "", fmt.Errorf("error checking subpath mount info for %s: %s", bindPathTarget, err)
|
||||
}
|
||||
if mntInfo.Root != subpath.Path {
|
||||
if !samePath {
|
||||
// It's already mounted but not what we want, unmount it
|
||||
if err = mounter.Unmount(bindPathTarget); err != nil {
|
||||
return false, "", fmt.Errorf("error ummounting %s: %s", bindPathTarget, err)
|
||||
@ -155,6 +154,23 @@ func prepareSubpathTarget(mounter mount.Interface, subpath Subpath) (bool, strin
|
||||
return false, bindPathTarget, nil
|
||||
}
|
||||
|
||||
func checkSubPathFileEqual(subpath Subpath, bindMountTarget string) (bool, error) {
|
||||
s, err := os.Lstat(subpath.Path)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("stat %s failed: %s", subpath.Path, err)
|
||||
}
|
||||
|
||||
t, err := os.Lstat(bindMountTarget)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("lstat %s failed: %s", bindMountTarget, err)
|
||||
}
|
||||
|
||||
if !os.SameFile(s, t) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func getSubpathBindTarget(subpath Subpath) string {
|
||||
// containerName is DNS label, i.e. safe as a directory name.
|
||||
return filepath.Join(subpath.PodDir, containerSubPathDirectoryName, subpath.VolumeName, subpath.ContainerName, strconv.Itoa(subpath.VolumeMountIndex))
|
||||
|
10
vendor/k8s.io/mount-utils/mount_helper_windows.go
generated
vendored
10
vendor/k8s.io/mount-utils/mount_helper_windows.go
generated
vendored
@ -84,15 +84,9 @@ func NormalizeWindowsPath(path string) string {
|
||||
|
||||
// ValidateDiskNumber : disk number should be a number in [0, 99]
|
||||
func ValidateDiskNumber(disk string) error {
|
||||
diskNum, err := strconv.Atoi(disk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wrong disk number format: %q, err:%v", disk, err)
|
||||
if _, err := strconv.Atoi(disk); err != nil {
|
||||
return fmt.Errorf("wrong disk number format: %q, err: %v", disk, err)
|
||||
}
|
||||
|
||||
if diskNum < 0 || diskNum > 99 {
|
||||
return fmt.Errorf("disk number out of range: %q", disk)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
110
vendor/modules.txt
vendored
110
vendor/modules.txt
vendored
@ -1629,7 +1629,7 @@ gopkg.in/yaml.v3
|
||||
# inet.af/tcpproxy v0.0.0-20200125044825-b6bb9b5b8252
|
||||
## explicit
|
||||
inet.af/tcpproxy
|
||||
# k8s.io/api v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.3-k3s1
|
||||
# k8s.io/api v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -1677,7 +1677,7 @@ k8s.io/api/scheduling/v1beta1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.3-k3s1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.4-k3s1
|
||||
k8s.io/apiextensions-apiserver/pkg/apihelpers
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install
|
||||
@ -1717,7 +1717,7 @@ k8s.io/apiextensions-apiserver/pkg/generated/openapi
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor
|
||||
k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition
|
||||
# k8s.io/apimachinery v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.3-k3s1
|
||||
# k8s.io/apimachinery v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -1782,7 +1782,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.3-k3s1
|
||||
# k8s.io/apiserver v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/configuration
|
||||
@ -1924,11 +1924,11 @@ k8s.io/apiserver/plugin/pkg/audit/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/oidc
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/cli-runtime v0.17.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.3-k3s1
|
||||
# k8s.io/cli-runtime v0.17.2 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.4-k3s1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.3-k3s1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
|
||||
@ -2220,7 +2220,7 @@ k8s.io/client-go/util/jsonpath
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.3-k3s1
|
||||
# k8s.io/cloud-provider v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/api
|
||||
@ -2242,13 +2242,13 @@ k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.3-k3s1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.4-k3s1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.3-k3s1
|
||||
# k8s.io/code-generator v0.19.7 => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.4-k3s1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
@ -2263,7 +2263,7 @@ k8s.io/code-generator/cmd/lister-gen/args
|
||||
k8s.io/code-generator/cmd/lister-gen/generators
|
||||
k8s.io/code-generator/pkg/namer
|
||||
k8s.io/code-generator/pkg/util
|
||||
# k8s.io/component-base v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.3-k3s1
|
||||
# k8s.io/component-base v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/cli/globalflag
|
||||
@ -2289,7 +2289,7 @@ k8s.io/component-base/term
|
||||
k8s.io/component-base/traces
|
||||
k8s.io/component-base/version
|
||||
k8s.io/component-base/version/verflag
|
||||
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.3-k3s1
|
||||
# k8s.io/component-helpers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.4-k3s1
|
||||
k8s.io/component-helpers/apimachinery/lease
|
||||
k8s.io/component-helpers/apps/poddisruptionbudget
|
||||
k8s.io/component-helpers/auth/rbac/reconciliation
|
||||
@ -2298,7 +2298,7 @@ k8s.io/component-helpers/node/topology
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
k8s.io/component-helpers/storage/volume
|
||||
# k8s.io/controller-manager v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.3-k3s1
|
||||
# k8s.io/controller-manager v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/controller-manager/app
|
||||
k8s.io/controller-manager/config
|
||||
@ -2312,11 +2312,11 @@ k8s.io/controller-manager/pkg/informerfactory
|
||||
k8s.io/controller-manager/pkg/leadermigration
|
||||
k8s.io/controller-manager/pkg/leadermigration/config
|
||||
k8s.io/controller-manager/pkg/leadermigration/options
|
||||
# k8s.io/cri-api v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.3-k3s1
|
||||
# k8s.io/cri-api v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/cri-api/pkg/apis
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.3-k3s1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.4-k3s1
|
||||
k8s.io/csi-translation-lib
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
|
||||
@ -2333,7 +2333,7 @@ k8s.io/klog
|
||||
# k8s.io/klog/v2 v2.9.0 => github.com/k3s-io/klog/v2 v2.9.0-k3s2
|
||||
## explicit
|
||||
k8s.io/klog/v2
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.3-k3s1
|
||||
# k8s.io/kube-aggregator v0.18.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.4-k3s1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/install
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
@ -2361,9 +2361,9 @@ k8s.io/kube-aggregator/pkg/controllers/status
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/etcd
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice/rest
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.3-k3s1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.4-k3s1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e
|
||||
# k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
k8s.io/kube-openapi/pkg/builder
|
||||
k8s.io/kube-openapi/pkg/common
|
||||
@ -2377,14 +2377,14 @@ k8s.io/kube-openapi/pkg/validation/spec
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt/bson
|
||||
k8s.io/kube-openapi/pkg/validation/validate
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.3-k3s1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.4-k3s1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.3-k3s1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.4-k3s1
|
||||
k8s.io/kube-scheduler/config/v1
|
||||
k8s.io/kube-scheduler/config/v1beta1
|
||||
k8s.io/kube-scheduler/config/v1beta2
|
||||
k8s.io/kube-scheduler/extender/v1
|
||||
# k8s.io/kubectl v0.22.3 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.3-k3s1
|
||||
# k8s.io/kubectl v0.22.4 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/kubectl/pkg/apps
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
@ -2462,7 +2462,7 @@ k8s.io/kubectl/pkg/util/storage
|
||||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.3-k3s1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.4-k3s1
|
||||
k8s.io/kubelet/config/v1alpha1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
k8s.io/kubelet/pkg/apis
|
||||
@ -2474,7 +2474,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1
|
||||
k8s.io/kubelet/pkg/apis/podresources/v1
|
||||
k8s.io/kubelet/pkg/apis/podresources/v1alpha1
|
||||
k8s.io/kubelet/pkg/apis/stats/v1alpha1
|
||||
# k8s.io/kubernetes v1.22.3 => github.com/k3s-io/kubernetes v1.22.3-k3s1
|
||||
# k8s.io/kubernetes v1.22.4 => github.com/k3s-io/kubernetes v1.22.4-k3s1
|
||||
## explicit
|
||||
k8s.io/kubernetes/cmd/kube-apiserver/app
|
||||
k8s.io/kubernetes/cmd/kube-apiserver/app/options
|
||||
@ -3206,7 +3206,7 @@ k8s.io/kubernetes/third_party/forked/gonum/graph
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/simple
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/traverse
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.3-k3s1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.4-k3s1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
@ -3249,7 +3249,7 @@ k8s.io/legacy-cloud-providers/openstack
|
||||
k8s.io/legacy-cloud-providers/vsphere
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.3-k3s1
|
||||
# k8s.io/metrics v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.4-k3s1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta1
|
||||
k8s.io/metrics/pkg/apis/custom_metrics/v1beta2
|
||||
@ -3265,9 +3265,9 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
|
||||
k8s.io/metrics/pkg/client/custom_metrics
|
||||
k8s.io/metrics/pkg/client/custom_metrics/scheme
|
||||
k8s.io/metrics/pkg/client/external_metrics
|
||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.3-k3s1
|
||||
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.4-k3s1
|
||||
k8s.io/mount-utils
|
||||
# k8s.io/pod-security-admission v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.3-k3s1
|
||||
# k8s.io/pod-security-admission v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.4-k3s1
|
||||
k8s.io/pod-security-admission/admission
|
||||
k8s.io/pod-security-admission/admission/api
|
||||
k8s.io/pod-security-admission/admission/api/load
|
||||
@ -3421,35 +3421,35 @@ sigs.k8s.io/yaml
|
||||
# google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
# google.golang.org/grpc => google.golang.org/grpc v1.38.0
|
||||
# gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2
|
||||
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.3-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.3-k3s1
|
||||
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.3-k3s1
|
||||
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.3-k3s1
|
||||
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.3-k3s1
|
||||
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.3-k3s1
|
||||
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.3-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.3-k3s1
|
||||
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.3-k3s1
|
||||
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.3-k3s1
|
||||
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.3-k3s1
|
||||
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.3-k3s1
|
||||
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.3-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.3-k3s1
|
||||
# k8s.io/api => github.com/k3s-io/kubernetes/staging/src/k8s.io/api v1.22.4-k3s1
|
||||
# k8s.io/apiextensions-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.22.4-k3s1
|
||||
# k8s.io/apimachinery => github.com/k3s-io/kubernetes/staging/src/k8s.io/apimachinery v1.22.4-k3s1
|
||||
# k8s.io/apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/apiserver v1.22.4-k3s1
|
||||
# k8s.io/cli-runtime => github.com/k3s-io/kubernetes/staging/src/k8s.io/cli-runtime v1.22.4-k3s1
|
||||
# k8s.io/client-go => github.com/k3s-io/kubernetes/staging/src/k8s.io/client-go v1.22.4-k3s1
|
||||
# k8s.io/cloud-provider => github.com/k3s-io/kubernetes/staging/src/k8s.io/cloud-provider v1.22.4-k3s1
|
||||
# k8s.io/cluster-bootstrap => github.com/k3s-io/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.22.4-k3s1
|
||||
# k8s.io/code-generator => github.com/k3s-io/kubernetes/staging/src/k8s.io/code-generator v1.22.4-k3s1
|
||||
# k8s.io/component-base => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-base v1.22.4-k3s1
|
||||
# k8s.io/component-helpers => github.com/k3s-io/kubernetes/staging/src/k8s.io/component-helpers v1.22.4-k3s1
|
||||
# k8s.io/controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/controller-manager v1.22.4-k3s1
|
||||
# k8s.io/cri-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/cri-api v1.22.4-k3s1
|
||||
# k8s.io/csi-translation-lib => github.com/k3s-io/kubernetes/staging/src/k8s.io/csi-translation-lib v1.22.4-k3s1
|
||||
# k8s.io/klog => github.com/k3s-io/klog v1.0.0-k3s2
|
||||
# k8s.io/klog/v2 => github.com/k3s-io/klog/v2 v2.9.0-k3s2
|
||||
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.3-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.3-k3s1
|
||||
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.3-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.3-k3s1
|
||||
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.3-k3s1
|
||||
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.3-k3s1
|
||||
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.3-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.3-k3s1
|
||||
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.3-k3s1
|
||||
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.3-k3s1
|
||||
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.3-k3s1
|
||||
# k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.3-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.3-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.3-k3s1
|
||||
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.3-k3s1
|
||||
# k8s.io/kube-aggregator => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-aggregator v1.22.4-k3s1
|
||||
# k8s.io/kube-controller-manager => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-controller-manager v1.22.4-k3s1
|
||||
# k8s.io/kube-proxy => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-proxy v1.22.4-k3s1
|
||||
# k8s.io/kube-scheduler => github.com/k3s-io/kubernetes/staging/src/k8s.io/kube-scheduler v1.22.4-k3s1
|
||||
# k8s.io/kubectl => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubectl v1.22.4-k3s1
|
||||
# k8s.io/kubelet => github.com/k3s-io/kubernetes/staging/src/k8s.io/kubelet v1.22.4-k3s1
|
||||
# k8s.io/kubernetes => github.com/k3s-io/kubernetes v1.22.4-k3s1
|
||||
# k8s.io/legacy-cloud-providers => github.com/k3s-io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.22.4-k3s1
|
||||
# k8s.io/metrics => github.com/k3s-io/kubernetes/staging/src/k8s.io/metrics v1.22.4-k3s1
|
||||
# k8s.io/mount-utils => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.22.4-k3s1
|
||||
# k8s.io/node-api => github.com/k3s-io/kubernetes/staging/src/k8s.io/node-api v1.22.4-k3s1
|
||||
# k8s.io/pod-security-admission => github.com/k3s-io/kubernetes/staging/src/k8s.io/pod-security-admission v1.22.4-k3s1
|
||||
# k8s.io/sample-apiserver => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-apiserver v1.22.4-k3s1
|
||||
# k8s.io/sample-cli-plugin => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.22.4-k3s1
|
||||
# k8s.io/sample-controller => github.com/k3s-io/kubernetes/staging/src/k8s.io/sample-controller v1.22.4-k3s1
|
||||
# mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7
|
||||
|
Loading…
Reference in New Issue
Block a user