mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Merge pull request #637 from erikwilson/update-v1.14.4-k3s.1
Update kubernetes to v1.14.4-k3s.1
This commit is contained in:
commit
13845df0f3
@ -312,7 +312,7 @@ import:
|
||||
- package: k8s.io/klog
|
||||
version: v0.2.0-14-g8e90cee79f8237
|
||||
- package: k8s.io/kubernetes
|
||||
version: v1.14.3-k3s.2
|
||||
version: v1.14.4-k3s.1
|
||||
repo: https://github.com/rancher/k3s.git
|
||||
transitive: true
|
||||
staging: true
|
||||
|
@ -9,7 +9,7 @@ package=github.com/opencontainers/runc/libcontainer/nsenter
|
||||
package=github.com/opencontainers/runc/libcontainer/specconv
|
||||
package=github.com/opencontainers/runc/contrib/cmd/recvtty
|
||||
|
||||
k8s.io/kubernetes v1.14.3-k3s.2 https://github.com/rancher/k3s.git transitive=true,staging=true
|
||||
k8s.io/kubernetes v1.14.4-k3s.1 https://github.com/rancher/k3s.git transitive=true,staging=true
|
||||
|
||||
github.com/rancher/wrangler 7737c167e16514a38229bc64c839cee8cd14e6d3
|
||||
github.com/rancher/wrangler-api v0.1.4
|
||||
|
2
vendor/k8s.io/apiserver/pkg/server/healthz/doc.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/server/healthz/doc.go
generated
vendored
@ -17,5 +17,5 @@ limitations under the License.
|
||||
// Package healthz implements basic http server health checking.
|
||||
// Usage:
|
||||
// import "k8s.io/apiserver/pkg/server/healthz"
|
||||
// healthz.DefaultHealthz()
|
||||
// healthz.InstallHandler(mux)
|
||||
package healthz // import "k8s.io/apiserver/pkg/server/healthz"
|
||||
|
9
vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go
generated
vendored
9
vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go
generated
vendored
@ -37,15 +37,6 @@ type HealthzChecker interface {
|
||||
Check(req *http.Request) error
|
||||
}
|
||||
|
||||
var defaultHealthz = sync.Once{}
|
||||
|
||||
// DefaultHealthz installs the default healthz check to the http.DefaultServeMux.
|
||||
func DefaultHealthz(checks ...HealthzChecker) {
|
||||
defaultHealthz.Do(func() {
|
||||
InstallHandler(http.DefaultServeMux, checks...)
|
||||
})
|
||||
}
|
||||
|
||||
// PingHealthz returns true automatically when checked
|
||||
var PingHealthz HealthzChecker = ping{}
|
||||
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "14"
|
||||
gitVersion = "v1.14.3-k3s.2"
|
||||
gitCommit = "6174f1fed28fd19300038f6578bf48e3920fa7ba"
|
||||
gitVersion = "v1.14.4-k3s.1"
|
||||
gitCommit = "ac992f3ef6c6d71e14366a1f65318e10eb3f4232"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-06-21T08:17+00:00Z"
|
||||
buildDate = "2019-07-14T07:04+00:00Z"
|
||||
)
|
||||
|
148
vendor/k8s.io/kubernetes/CHANGELOG-1.14.md
generated
vendored
148
vendor/k8s.io/kubernetes/CHANGELOG-1.14.md
generated
vendored
@ -1,23 +1,30 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.14.2](#v1142)
|
||||
- [Downloads for v1.14.2](#downloads-for-v1142)
|
||||
- [v1.14.3](#v1143)
|
||||
- [Downloads for v1.14.3](#downloads-for-v1143)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.14.1](#changelog-since-v1141)
|
||||
- [Changelog since v1.14.2](#changelog-since-v1142)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.14.1](#v1141)
|
||||
- [Downloads for v1.14.1](#downloads-for-v1141)
|
||||
- [v1.14.2](#v1142)
|
||||
- [Downloads for v1.14.2](#downloads-for-v1142)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.14.0](#changelog-since-v1140)
|
||||
- [Changelog since v1.14.1](#changelog-since-v1141)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.14.0](#v1140)
|
||||
- [Downloads for v1.14.0](#downloads-for-v1140)
|
||||
- [v1.14.1](#v1141)
|
||||
- [Downloads for v1.14.1](#downloads-for-v1141)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.14.0](#changelog-since-v1140)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.14.0](#v1140)
|
||||
- [Downloads for v1.14.0](#downloads-for-v1140)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Kubernetes v1.14 Release Notes](#kubernetes-v114-release-notes)
|
||||
- [1.14 What’s New](#114-whats-new)
|
||||
- [Known Issues](#known-issues)
|
||||
@ -49,57 +56,124 @@
|
||||
- [External Dependencies](#external-dependencies)
|
||||
- [v1.14.0-rc.1](#v1140-rc1)
|
||||
- [Downloads for v1.14.0-rc.1](#downloads-for-v1140-rc1)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.14.0-beta.2](#v1140-beta2)
|
||||
- [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.14.0-beta.1](#v1140-beta1)
|
||||
- [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1)
|
||||
- [v1.14.0-beta.2](#v1140-beta2)
|
||||
- [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.14.0-alpha.3](#v1140-alpha3)
|
||||
- [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3)
|
||||
- [v1.14.0-beta.1](#v1140-beta1)
|
||||
- [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.14.0-alpha.2](#v1140-alpha2)
|
||||
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
|
||||
- [v1.14.0-alpha.3](#v1140-alpha3)
|
||||
- [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
|
||||
- [Action Required](#action-required-4)
|
||||
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.14.0-alpha.1](#v1140-alpha1)
|
||||
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
|
||||
- [v1.14.0-alpha.2](#v1140-alpha2)
|
||||
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
|
||||
- [Client Binaries](#client-binaries-8)
|
||||
- [Server Binaries](#server-binaries-8)
|
||||
- [Node Binaries](#node-binaries-8)
|
||||
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
|
||||
- [Action Required](#action-required-4)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
- [v1.14.0-alpha.1](#v1140-alpha1)
|
||||
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
|
||||
- [Client Binaries](#client-binaries-9)
|
||||
- [Server Binaries](#server-binaries-9)
|
||||
- [Node Binaries](#node-binaries-9)
|
||||
- [Changelog since v1.13.0](#changelog-since-v1130)
|
||||
- [Action Required](#action-required-5)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
- [Other notable changes](#other-notable-changes-8)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.14.3
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
|
||||
## Downloads for v1.14.3
|
||||
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes.tar.gz) | `7e82dc2070b55ca8f4746579a66d9dcd61e750f8c20632f901cbeb8c9c5bc28b7c428531d96375a5491e22397e4da3abaad2b1399250736380c586a3163fc960`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-src.tar.gz) | `58db2d138b1a0575dfdc9be1a742788c56626275f588d262ab64840627e4055c68480f3b2b96c0059967e13f53028cc0bae5882f4abf1767bfcea15ebb761c73`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-darwin-386.tar.gz) | `29ee18e669171002a11b39fe5046577d91472dfb089de95ad23499a8eb686777887c0bcc81a1681b7ad28f35303ed638bfef36ec94fc19caec9982ba94cd9c74`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-darwin-amd64.tar.gz) | `2cf1680e9dfae84dbf4d1f10e9142a087ac9be56c9c5233b0cea669f9dae59ca645e23109a5f039c174dea42e035180e9e3b5298affcba4aa980760af16ba2ff`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-linux-386.tar.gz) | `39ace58bdb85397b856b52d54e97eb9f489169995f55a55b278c1c923829c0c264113a3251f01b705e53f35a213d420570feff41c130935aca3ce9e0c1bccc5b`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-linux-amd64.tar.gz) | `dcb6eeebf618a01f274091998984f8afca567f7a20ffff810bef97e9654af5e4b912f83c97e6ed13dd1b93fcea2c14d7cde4fd55b174f32aa1f2f005baadf2d7`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-linux-arm.tar.gz) | `4a7dd3310fa96e842ab3505f7815b0778af621fd639ab8296b4994428d930d63057f073e81ebc29852beaa4f6e55271289342c90d46d1b5a70564c58f1bd61c7`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-linux-arm64.tar.gz) | `7f091cff9c63d58623b5b49967b8824e02139a347c9c5eb9c9fa178b3825320614623afc2e0a2414d29b3cb8f70c8ecfbb23c907e351c63312cd4bc4a57d4610`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-linux-ppc64le.tar.gz) | `f7f43d71663a19c2526ae8496b76b2e9df2285ae31f94ba054e45704cc94e2e04cc563fdfce6afebd532ecaf2fb2e68e7ed1bd2c39641ac10bdf910cb17df3ca`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-linux-s390x.tar.gz) | `23e782ab1fe8e5d30e1cdff8594de4cc4750ab178ae3c71c177c49acf9f35d908bdf8281ecb174a52664b9841ece206ec8140901334ef7836f556fbe66c04572`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-windows-386.tar.gz) | `25c05c105996c0d75443e75b0809b7bf7c33d6d8d1d24d2b0b9c2f872838277030870c02c96bcd4ac378c66a7ad59e2165293a6ac6feea12691d68749dfe1c5b`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-client-windows-amd64.tar.gz) | `df0da8deca3a354406a4be3f025bd9b6729f68c4414126bdcf58a649228196de42d8b94227963bece055801cf883a5de3cbb68b940bfdc98c56eab26e3015029`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-server-linux-amd64.tar.gz) | `8d7192d1e74a139feca532d9956f6d3dc5289eb5c3eff643fd13d02815ed11a12dca5580e830d69cb142e1b5e99e0ecf8939d9e260888bca184f9e013f2b5948`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-server-linux-arm.tar.gz) | `89c26c83562e32340ab58a0aa835c0b586c6626e811231129645205edfc0bf563aba4125bb932761a27757a0f205ea013fea30210448eac68cd5b25787b60bc8`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-server-linux-arm64.tar.gz) | `f74ef5b4a680167251b93d346c9864436f3906f72d676483c9ce27ea0f42e366eab0f85aebd138b915fae2e893bed579a755be9869bb0fce43c0460cd629738c`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-server-linux-ppc64le.tar.gz) | `6bfc3cbd4604112de5a09fa9c8e35ad4637d1fedc024ef20fafb9ef5adf5f160656a6e7c3298d7f39f741872dc496e5f1b3dc422b5e6f24be7b1d20704e80f6b`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-server-linux-s390x.tar.gz) | `a1355a9db2e34b062b5e335c52e7a5e2b6e55490896bf9ec1f3900b048b73d4c9eeac6fd770efed269a7fbdcdc613ba10fce3ce4d22aa6d86609790b91c321e9`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha512 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-node-linux-amd64.tar.gz) | `1eb8610f93153b5e274ae61f68f9ccd30bdb5373b6098b03a0f885da4b389bfd2f060208aedab663c03311bcae853feed50070e55497d353b243e781d7cef62a`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-node-linux-arm.tar.gz) | `cedef35f9e84c93d099117a413de39bb5a55e532de1b24dd6c53bcf849ed3100321a0edbd1b895d0a82c0cbdf8d567a44818c9b5d7fd064a168f42a300c834e0`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-node-linux-arm64.tar.gz) | `d1f4152ece68cd3d96ff6632fd2229f7da630d5256fd8edd225ee75a4fc60976ad0778dfc71dee8bafdf6b1373a365a157f17faec00fdf8ce87664ace99998f2`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-node-linux-ppc64le.tar.gz) | `dbfc833c385c46118aac8d651b9053e48ee9f961fee0668756db0a1254d178d7c9d6c160e6dc9c14e643a1831a9d0e989a00a1b78b455b822c3db8ba29f6b223`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-node-linux-s390x.tar.gz) | `d715253ab6d2b6b6478d19f4ee4b69d4ba4ae5f20643c3b908fe6e481900f83440ded286c955aa1b189bf4ea6461a0d9e4348d61205dc45e1ef26f8b2c03326b`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.3/kubernetes-node-windows-amd64.tar.gz) | `3e111f2121c4d87d9492c8ba13812537cce11bf2c52852715d0ee5a3d475981567a058b0a17de9f65e1ba09cb5e1b4b1a78c530d78850a0edfe610b77a7dca18`
|
||||
|
||||
## Changelog since v1.14.2
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* Fix vSphere SAML token auth when using Zones ([#78180](https://github.com/kubernetes/kubernetes/pull/78180), [@dougm](https://github.com/dougm))
|
||||
* IPVS: Disable graceful termination for UDP traffic to solve issues with high number of UDP connections (DNS / syslog in particular) ([#77802](https://github.com/kubernetes/kubernetes/pull/77802), [@lbernail](https://github.com/lbernail))
|
||||
* Fix broken detection of non-root image user ID ([#78261](https://github.com/kubernetes/kubernetes/pull/78261), [@tallclair](https://github.com/tallclair))
|
||||
* fix azure retry issue when return 2XX with error ([#78298](https://github.com/kubernetes/kubernetes/pull/78298), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* kubelet: fix fail to close kubelet->API connections on heartbeat failure when bootstrapping or client certificate rotation is disabled ([#78016](https://github.com/kubernetes/kubernetes/pull/78016), [@gaorong](https://github.com/gaorong))
|
||||
* Active watches of custom resources now terminate properly if the CRD is modified. ([#78029](https://github.com/kubernetes/kubernetes/pull/78029), [@liggitt](https://github.com/liggitt))
|
||||
* client-go and kubectl no longer write cached discovery files with world-accessible file permissions ([#77874](https://github.com/kubernetes/kubernetes/pull/77874), [@yuchengwu](https://github.com/yuchengwu))
|
||||
* Fix panic logspam when running kubelet in standalone mode. ([#77888](https://github.com/kubernetes/kubernetes/pull/77888), [@tallclair](https://github.com/tallclair))
|
||||
* Fixed a bug in the apiserver storage that could cause just-added finalizers to be ignored on an immediately following delete request, leading to premature deletion. ([#77619](https://github.com/kubernetes/kubernetes/pull/77619), [@caesarxuchao](https://github.com/caesarxuchao))
|
||||
* Fixes a bug where dry-run is not honored for pod/eviction sub-resource. ([#76969](https://github.com/kubernetes/kubernetes/pull/76969), [@apelisse](https://github.com/apelisse))
|
||||
* fix incorrect prometheus azure metrics ([#77722](https://github.com/kubernetes/kubernetes/pull/77722), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* fix KUBE_SERVER_PLATFORMS null error when cross compiling kubectl for non-linux platform ([#78059](https://github.com/kubernetes/kubernetes/pull/78059), [@figo](https://github.com/figo))
|
||||
|
||||
|
||||
|
||||
# v1.14.2
|
||||
|
||||
[Documentation](https://docs.k8s.io)
|
||||
@ -375,6 +449,7 @@ kubernetes/enhancements: [kep](https://github.com/kubernetes/enhancements/blob/m
|
||||
- Default RBAC policy no longer grants access to discovery and permission-checking APIs (used by `kubectl auth can-i`) to *unauthenticated* users. Upgraded clusters preserve prior behavior, but cluster administrators wishing to grant unauthenticated users access in new clusters will need to explicitly opt-in to expose the discovery and/or permission-checking APIs:
|
||||
- `kubectl create clusterrolebinding anonymous-discovery --clusterrole=system:discovery --group=system:unauthenticated`
|
||||
- `kubectl create clusterrolebinding anonymous-access-review --clusterrole=system:basic-user --group=system:unauthenticated`
|
||||
([#73807](https://github.com/kubernetes/kubernetes/pull/73807), [@dekkagaijin](https://github.com/dekkagaijin))
|
||||
- The deprecated --storage-versions flag has been removed. The storage versions will always be the default value built-in the kube-apiserver binary. ([#67678](https://github.com/kubernetes/kubernetes/pull/67678), [@caesarxuchao](https://github.com/caesarxuchao))
|
||||
- The deprecated `--repair-malformed-updates` flag has been removed ([#73663](https://github.com/kubernetes/kubernetes/pull/73663), [@danielqsj](https://github.com/danielqsj))
|
||||
- The `/swaggerapi/*` schema docs, deprecated since 1.7, have been removed in favor of the /openapi/v2 schema docs. ([#72924](https://github.com/kubernetes/kubernetes/pull/72924), [@liggitt](https://github.com/liggitt))
|
||||
@ -966,12 +1041,7 @@ filename | sha512 hash
|
||||
* While this is a backwards-incompatible change, it would have been impossible to setup reliable monitoring around these metrics since the labels were not stable.
|
||||
* Add a configuration field to shorten the timeout of validating/mutating admission webhook call. The timeout value must be between 1 and 30 seconds. Default to 30 seconds when unspecified. ([#74562](https://github.com/kubernetes/kubernetes/pull/74562), [@roycaihw](https://github.com/roycaihw))
|
||||
* client-go: PortForwarder.GetPorts() now contain correct local port if no local port was initially specified when setting up the port forwarder ([#73676](https://github.com/kubernetes/kubernetes/pull/73676), [@martin-helmich](https://github.com/martin-helmich))
|
||||
* # Apply resources from a directory containing kustomization.yaml ([#74140](https://github.com/kubernetes/kubernetes/pull/74140), [@Liujingfang1](https://github.com/Liujingfang1))
|
||||
* kubectl apply -k dir
|
||||
* # Delete resources from a directory containing kustomization.yaml.
|
||||
* kubectl delete -k dir
|
||||
* # List resources from a directory containing kustomization.yaml
|
||||
* kubectl get -k dir
|
||||
* The examples in kubectl apply/get/delete are updated to support `-k` which uses a `kustomization.yaml` file. ([#74140](https://github.com/kubernetes/kubernetes/pull/74140), [@Liujingfang1](https://github.com/Liujingfang1))
|
||||
* kubeadm: Allow to download certificate secrets uploaded by `init` or `upload-certs` phase, allowing to transfer certificate secrets (certificates and keys) from the cluster to other master machines when creating HA deployments. ([#74168](https://github.com/kubernetes/kubernetes/pull/74168), [@ereslibre](https://github.com/ereslibre))
|
||||
* Fixes an issue with missing apiVersion/kind in object data sent to admission webhooks ([#74448](https://github.com/kubernetes/kubernetes/pull/74448), [@liggitt](https://github.com/liggitt))
|
||||
* client-go: the deprecated versionless API group accessors (like `clientset.Apps()` have been removed). Use an explicit version instead (like `clientset.AppsV1()`) ([#74422](https://github.com/kubernetes/kubernetes/pull/74422), [@liggitt](https://github.com/liggitt))
|
||||
|
1
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kube-controller-manager/app/core.go
generated
vendored
@ -159,6 +159,7 @@ func startAttachDetachController(ctx ControllerContext) (http.Handler, bool, err
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumes(),
|
||||
ctx.InformerFactory.Storage().V1beta1().CSINodes(),
|
||||
ctx.InformerFactory.Storage().V1beta1().CSIDrivers(),
|
||||
ProbeAttachableVolumePlugins(),
|
||||
GetDynamicPluginProber(ctx.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration),
|
||||
ctx.ComponentConfig.AttachDetachController.DisableAttachDetachReconcilerSync,
|
||||
|
5
vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
generated
vendored
@ -674,9 +674,10 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
|
||||
}
|
||||
|
||||
if s.HealthzPort > 0 {
|
||||
healthz.DefaultHealthz()
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
go wait.Until(func() {
|
||||
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil)
|
||||
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), mux)
|
||||
if err != nil {
|
||||
klog.Errorf("Starting health server failed: %v", err)
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/api/pod/util.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/api/pod/util.go
generated
vendored
@ -406,12 +406,22 @@ func dropDisabledProcMountField(podSpec, oldPodSpec *api.PodSpec) {
|
||||
defaultProcMount := api.DefaultProcMount
|
||||
for i := range podSpec.Containers {
|
||||
if podSpec.Containers[i].SecurityContext != nil {
|
||||
podSpec.Containers[i].SecurityContext.ProcMount = &defaultProcMount
|
||||
if podSpec.Containers[i].SecurityContext.ProcMount != nil {
|
||||
// The ProcMount field was improperly forced to non-nil in 1.12.
|
||||
// If the feature is disabled, and the existing object is not using any non-default values, and the ProcMount field is present in the incoming object, force to the default value.
|
||||
// Note: we cannot force the field to nil when the feature is disabled because it causes a diff against previously persisted data.
|
||||
podSpec.Containers[i].SecurityContext.ProcMount = &defaultProcMount
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range podSpec.InitContainers {
|
||||
if podSpec.InitContainers[i].SecurityContext != nil {
|
||||
podSpec.InitContainers[i].SecurityContext.ProcMount = &defaultProcMount
|
||||
if podSpec.InitContainers[i].SecurityContext.ProcMount != nil {
|
||||
// The ProcMount field was improperly forced to non-nil in 1.12.
|
||||
// If the feature is disabled, and the existing object is not using any non-default values, and the ProcMount field is present in the incoming object, force to the default value.
|
||||
// Note: we cannot force the field to nil when the feature is disabled because it causes a diff against previously persisted data.
|
||||
podSpec.InitContainers[i].SecurityContext.ProcMount = &defaultProcMount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -473,7 +483,7 @@ func runtimeClassInUse(podSpec *api.PodSpec) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// procMountInUse returns true if the pod spec is non-nil and has a SecurityContext's ProcMount field set
|
||||
// procMountInUse returns true if the pod spec is non-nil and has a SecurityContext's ProcMount field set to a non-default value
|
||||
func procMountInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.defaults.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/apis/apps/v1/zz_generated.defaults.go
generated
vendored
@ -136,9 +136,6 @@ func SetObjectDefaults_DaemonSet(in *v1.DaemonSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -181,9 +178,6 @@ func SetObjectDefaults_DaemonSet(in *v1.DaemonSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,9 +283,6 @@ func SetObjectDefaults_Deployment(in *v1.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -334,9 +325,6 @@ func SetObjectDefaults_Deployment(in *v1.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -442,9 +430,6 @@ func SetObjectDefaults_ReplicaSet(in *v1.ReplicaSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -487,9 +472,6 @@ func SetObjectDefaults_ReplicaSet(in *v1.ReplicaSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -595,9 +577,6 @@ func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -640,9 +619,6 @@ func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.VolumeClaimTemplates {
|
||||
a := &in.Spec.VolumeClaimTemplates[i]
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta1/zz_generated.defaults.go
generated
vendored
@ -132,9 +132,6 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -177,9 +174,6 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -285,9 +279,6 @@ func SetObjectDefaults_StatefulSet(in *v1beta1.StatefulSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -330,9 +321,6 @@ func SetObjectDefaults_StatefulSet(in *v1beta1.StatefulSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.VolumeClaimTemplates {
|
||||
a := &in.Spec.VolumeClaimTemplates[i]
|
||||
|
24
vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.defaults.go
generated
vendored
24
vendor/k8s.io/kubernetes/pkg/apis/apps/v1beta2/zz_generated.defaults.go
generated
vendored
@ -136,9 +136,6 @@ func SetObjectDefaults_DaemonSet(in *v1beta2.DaemonSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -181,9 +178,6 @@ func SetObjectDefaults_DaemonSet(in *v1beta2.DaemonSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,9 +283,6 @@ func SetObjectDefaults_Deployment(in *v1beta2.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -334,9 +325,6 @@ func SetObjectDefaults_Deployment(in *v1beta2.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -442,9 +430,6 @@ func SetObjectDefaults_ReplicaSet(in *v1beta2.ReplicaSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -487,9 +472,6 @@ func SetObjectDefaults_ReplicaSet(in *v1beta2.ReplicaSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -595,9 +577,6 @@ func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -640,9 +619,6 @@ func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.VolumeClaimTemplates {
|
||||
a := &in.Spec.VolumeClaimTemplates[i]
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/apis/batch/v1/zz_generated.defaults.go
generated
vendored
@ -130,9 +130,6 @@ func SetObjectDefaults_Job(in *v1.Job) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -175,9 +172,6 @@ func SetObjectDefaults_Job(in *v1.Job) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
corev1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.defaults.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/batch/v1beta1/zz_generated.defaults.go
generated
vendored
@ -131,9 +131,6 @@ func SetObjectDefaults_CronJob(in *v1beta1.CronJob) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i]
|
||||
@ -176,9 +173,6 @@ func SetObjectDefaults_CronJob(in *v1beta1.CronJob) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,9 +277,6 @@ func SetObjectDefaults_JobTemplate(in *v1beta1.JobTemplate) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.Template.Spec.Containers {
|
||||
a := &in.Template.Spec.Template.Spec.Containers[i]
|
||||
@ -328,8 +319,5 @@ func SetObjectDefaults_JobTemplate(in *v1beta1.JobTemplate) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/zz_generated.defaults.go
generated
vendored
@ -131,9 +131,6 @@ func SetObjectDefaults_CronJob(in *v2alpha1.CronJob) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i]
|
||||
@ -176,9 +173,6 @@ func SetObjectDefaults_CronJob(in *v2alpha1.CronJob) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -283,9 +277,6 @@ func SetObjectDefaults_JobTemplate(in *v2alpha1.JobTemplate) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.Template.Spec.Containers {
|
||||
a := &in.Template.Spec.Template.Spec.Containers[i]
|
||||
@ -328,8 +319,5 @@ func SetObjectDefaults_JobTemplate(in *v2alpha1.JobTemplate) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go
generated
vendored
@ -421,10 +421,3 @@ func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) {
|
||||
obj.Type = &typeVol
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_SecurityContext(obj *v1.SecurityContext) {
|
||||
if obj.ProcMount == nil {
|
||||
defProcMount := v1.DefaultProcMount
|
||||
obj.ProcMount = &defProcMount
|
||||
}
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go
generated
vendored
@ -263,9 +263,6 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Containers {
|
||||
a := &in.Spec.Containers[i]
|
||||
@ -308,9 +305,6 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -415,9 +409,6 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.Containers {
|
||||
a := &in.Template.Spec.Containers[i]
|
||||
@ -460,9 +451,6 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -569,9 +557,6 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -614,9 +599,6 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
18
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/zz_generated.defaults.go
generated
vendored
@ -138,9 +138,6 @@ func SetObjectDefaults_DaemonSet(in *v1beta1.DaemonSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -183,9 +180,6 @@ func SetObjectDefaults_DaemonSet(in *v1beta1.DaemonSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,9 +285,6 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -336,9 +327,6 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -466,9 +454,6 @@ func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.Containers {
|
||||
a := &in.Spec.Template.Spec.Containers[i]
|
||||
@ -511,9 +496,6 @@ func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.SecurityContext != nil {
|
||||
v1.SetDefaults_SecurityContext(a.SecurityContext)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -235,7 +235,8 @@ func (a *HorizontalController) processNextWorkItem() bool {
|
||||
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
|
||||
metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
|
||||
|
||||
currentReplicas := scale.Status.Replicas
|
||||
specReplicas := scale.Spec.Replicas
|
||||
statusReplicas := scale.Status.Replicas
|
||||
|
||||
statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
|
||||
|
||||
@ -267,7 +268,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
}
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(specReplicas, statusReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
|
||||
if err != nil {
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
}
|
||||
@ -278,17 +279,17 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
|
||||
}
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(specReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
|
||||
if err != nil {
|
||||
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
|
||||
}
|
||||
case autoscalingv2.ResourceMetricSourceType:
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i])
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(specReplicas, metricSpec, hpa, selector, &statuses[i])
|
||||
if err != nil {
|
||||
return 0, "", nil, time.Time{}, err
|
||||
}
|
||||
case autoscalingv2.ExternalMetricSourceType:
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i])
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, metricSpec, hpa, selector, &statuses[i])
|
||||
if err != nil {
|
||||
return 0, "", nil, time.Time{}, err
|
||||
}
|
||||
@ -326,9 +327,9 @@ func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error
|
||||
}
|
||||
|
||||
// computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForObjectMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
|
||||
func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (int32, time.Time, string, error) {
|
||||
if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -349,7 +350,7 @@ func (a *HorizontalController) computeStatusForObjectMetric(currentReplicas int3
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), nil
|
||||
} else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(currentReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -452,9 +453,9 @@ func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas in
|
||||
}
|
||||
|
||||
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForExternalMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
|
||||
func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (int32, time.Time, string, error) {
|
||||
if metricSpec.External.Target.AverageValue != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(currentReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -475,7 +476,7 @@ func (a *HorizontalController) computeStatusForExternalMetric(currentReplicas in
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), nil
|
||||
}
|
||||
if metricSpec.External.Target.Value != nil {
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(currentReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
|
||||
replicaCountProposal, utilizationProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetExternalMetric", err.Error())
|
||||
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetExternalMetric", "the HPA was unable to compute the replica count: %v", err)
|
||||
@ -550,7 +551,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
|
||||
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
|
||||
}
|
||||
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale")
|
||||
currentReplicas := scale.Status.Replicas
|
||||
currentReplicas := scale.Spec.Replicas
|
||||
a.recordInitialRecommendation(currentReplicas, key)
|
||||
|
||||
var metricStatuses []autoscalingv2.MetricStatus
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -333,7 +333,7 @@ func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, tar
|
||||
// GetExternalPerPodMetricReplicas calculates the desired replica count based on a
|
||||
// target metric value per pod (as a milli-value) for the external metric in the
|
||||
// given namespace, and the current replica count.
|
||||
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(statusReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, err
|
||||
@ -347,13 +347,13 @@ func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int3
|
||||
utilization = utilization + val
|
||||
}
|
||||
|
||||
replicaCount = currentReplicas
|
||||
replicaCount = statusReplicas
|
||||
usageRatio := float64(utilization) / (float64(targetUtilizationPerPod) * float64(replicaCount))
|
||||
if math.Abs(1.0-usageRatio) > c.tolerance {
|
||||
// update number of replicas if the change is large enough
|
||||
replicaCount = int32(math.Ceil(float64(utilization) / float64(targetUtilizationPerPod)))
|
||||
}
|
||||
utilization = int64(math.Ceil(float64(utilization) / float64(currentReplicas)))
|
||||
utilization = int64(math.Ceil(float64(utilization) / float64(statusReplicas)))
|
||||
return replicaCount, utilization, timestamp, nil
|
||||
}
|
||||
|
||||
|
25
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/attach_detach_controller.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -48,6 +49,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/util"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
@ -103,6 +105,7 @@ func NewAttachDetachController(
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
csiNodeInformer storageinformers.CSINodeInformer,
|
||||
csiDriverInformer storageinformers.CSIDriverInformer,
|
||||
plugins []volume.VolumePlugin,
|
||||
prober volume.DynamicPluginProber,
|
||||
disableReconciliationSync bool,
|
||||
@ -136,6 +139,11 @@ func NewAttachDetachController(
|
||||
pvcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcs"),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
|
||||
adc.csiDriverLister = csiDriverInformer.Lister()
|
||||
adc.csiDriversSynced = csiDriverInformer.Informer().HasSynced
|
||||
}
|
||||
|
||||
if err := adc.volumePluginMgr.InitPlugins(plugins, prober, adc); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
|
||||
}
|
||||
@ -260,6 +268,12 @@ type attachDetachController struct {
|
||||
csiNodeLister storagelisters.CSINodeLister
|
||||
csiNodeSynced kcache.InformerSynced
|
||||
|
||||
// csiDriverLister is the shared CSIDriver lister used to fetch and store
|
||||
// CSIDriver objects from the API server. It is shared with other controllers
|
||||
// and therefore the CSIDriver objects in its store should be treated as immutable.
|
||||
csiDriverLister storagelisters.CSIDriverLister
|
||||
csiDriversSynced kcache.InformerSynced
|
||||
|
||||
// volumePluginMgr used to initialize and fetch volume plugins
|
||||
volumePluginMgr volume.VolumePluginMgr
|
||||
|
||||
@ -313,6 +327,9 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
|
||||
if adc.csiNodeSynced != nil {
|
||||
synced = append(synced, adc.csiNodeSynced)
|
||||
}
|
||||
if adc.csiDriversSynced != nil {
|
||||
synced = append(synced, adc.csiDriversSynced)
|
||||
}
|
||||
|
||||
if !controller.WaitForCacheSync("attach detach", stopCh, synced...) {
|
||||
return
|
||||
@ -655,6 +672,10 @@ func (adc *attachDetachController) CSINodeLister() storagelisters.CSINodeLister
|
||||
return adc.csiNodeLister
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) CSIDriverLister() storagelisters.CSIDriverLister {
|
||||
return adc.csiDriverLister
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) IsAttachDetachController() bool {
|
||||
return true
|
||||
}
|
||||
@ -775,3 +796,7 @@ func (adc *attachDetachController) GetSubpather() subpath.Interface {
|
||||
// Subpaths not needed in attachdetach controller
|
||||
return nil
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetCSIDriverLister() storagelisters.CSIDriverLister {
|
||||
return adc.csiDriverLister
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
@ -132,9 +132,11 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/certificate:go_default_library",
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cgroup_manager_linux.go
generated
vendored
@ -239,7 +239,7 @@ func getSupportedSubsystems() map[subsystem]bool {
|
||||
supportedSubsystems := map[subsystem]bool{
|
||||
&cgroupfs.MemoryGroup{}: true,
|
||||
&cgroupfs.CpuGroup{}: true,
|
||||
&cgroupfs.PidsGroup{}: true,
|
||||
&cgroupfs.PidsGroup{}: false,
|
||||
}
|
||||
// not all hosts support hugetlb cgroup, and in the absent of hugetlb, we will fail silently by reporting no capacity.
|
||||
supportedSubsystems[&cgroupfs.HugetlbGroup{}] = false
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/kuberuntime/kuberuntime_manager.go
generated
vendored
@ -477,11 +477,15 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku
|
||||
// If we need to (re-)create the pod sandbox, everything will need to be
|
||||
// killed and recreated, and init containers should be purged.
|
||||
if createPodSandbox {
|
||||
if !shouldRestartOnFailure(pod) && attempt != 0 {
|
||||
if !shouldRestartOnFailure(pod) && attempt != 0 && len(podStatus.ContainerStatuses) != 0 {
|
||||
// Should not restart the pod, just return.
|
||||
// we should not create a sandbox for a pod if it is already done.
|
||||
// if all containers are done and should not be started, there is no need to create a new sandbox.
|
||||
// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.
|
||||
//
|
||||
// If ContainerStatuses is empty, we assume that we've never
|
||||
// successfully created any containers. In this case, we should
|
||||
// retry creating the sandbox.
|
||||
changes.CreateSandbox = false
|
||||
return changes
|
||||
}
|
||||
|
33
vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider_windows.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/kubelet/stats/cri_stats_provider_windows.go
generated
vendored
@ -19,6 +19,7 @@ limitations under the License.
|
||||
package stats
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim"
|
||||
@ -40,18 +41,11 @@ func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.Net
|
||||
|
||||
stats := make(map[string]*statsapi.NetworkStats)
|
||||
for _, c := range containers {
|
||||
container, err := hcsshim.OpenContainer(c.ID)
|
||||
cstats, err := fetchContainerStats(c)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Failed to open container %q with error '%v', continue to get stats for other containers", c.ID, err)
|
||||
klog.V(4).Infof("Failed to fetch statistics for container %q with error '%v', continue to get stats for other containers", c.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Failed to get statistics for container %q with error '%v', continue to get stats for other containers", c.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(cstats.Network) > 0 {
|
||||
stats[c.ID] = hcsStatsToNetworkStats(cstats.Timestamp, cstats.Network)
|
||||
}
|
||||
@ -60,6 +54,27 @@ func (p *criStatsProvider) listContainerNetworkStats() (map[string]*statsapi.Net
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func fetchContainerStats(c hcsshim.ContainerProperties) (stats hcsshim.Statistics, err error) {
|
||||
var (
|
||||
container hcsshim.Container
|
||||
)
|
||||
container, err = hcsshim.OpenContainer(c.ID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := container.Close(); closeErr != nil {
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to close container after error %v; close error: %v", err, closeErr)
|
||||
} else {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return container.Statistics()
|
||||
}
|
||||
|
||||
// hcsStatsToNetworkStats converts hcsshim.Statistics.Network to statsapi.NetworkStats
|
||||
func hcsStatsToNetworkStats(timestamp time.Time, hcsStats []hcsshim.NetworkStats) *statsapi.NetworkStats {
|
||||
result := &statsapi.NetworkStats{
|
||||
|
58
vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go
generated
vendored
58
vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go
generated
vendored
@ -26,8 +26,14 @@ import (
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
@ -52,12 +58,33 @@ func NewInitializedVolumePluginMgr(
|
||||
plugins []volume.VolumePlugin,
|
||||
prober volume.DynamicPluginProber) (*volume.VolumePluginMgr, error) {
|
||||
|
||||
// Initialize csiDriverLister before calling InitPlugins
|
||||
var informerFactory informers.SharedInformerFactory
|
||||
var csiDriverLister storagelisters.CSIDriverLister
|
||||
var csiDriversSynced cache.InformerSynced
|
||||
const resyncPeriod = 0
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
|
||||
// Don't initialize if kubeClient is nil
|
||||
if kubelet.kubeClient != nil {
|
||||
informerFactory = informers.NewSharedInformerFactory(kubelet.kubeClient, resyncPeriod)
|
||||
csiDriverInformer := informerFactory.Storage().V1beta1().CSIDrivers()
|
||||
csiDriverLister = csiDriverInformer.Lister()
|
||||
csiDriversSynced = csiDriverInformer.Informer().HasSynced
|
||||
|
||||
} else {
|
||||
klog.Warning("kubeClient is nil. Skip initialization of CSIDriverLister")
|
||||
}
|
||||
}
|
||||
|
||||
kvh := &kubeletVolumeHost{
|
||||
kubelet: kubelet,
|
||||
volumePluginMgr: volume.VolumePluginMgr{},
|
||||
secretManager: secretManager,
|
||||
configMapManager: configMapManager,
|
||||
tokenManager: tokenManager,
|
||||
informerFactory: informerFactory,
|
||||
csiDriverLister: csiDriverLister,
|
||||
csiDriversSynced: csiDriversSynced,
|
||||
}
|
||||
|
||||
if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil {
|
||||
@ -83,6 +110,9 @@ type kubeletVolumeHost struct {
|
||||
secretManager secret.Manager
|
||||
tokenManager *token.Manager
|
||||
configMapManager configmap.Manager
|
||||
informerFactory informers.SharedInformerFactory
|
||||
csiDriverLister storagelisters.CSIDriverLister
|
||||
csiDriversSynced cache.InformerSynced
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) SetKubeletError(err error) {
|
||||
@ -121,6 +151,34 @@ func (kvh *kubeletVolumeHost) GetSubpather() subpath.Interface {
|
||||
return kvh.kubelet.subpather
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetInformerFactory() informers.SharedInformerFactory {
|
||||
return kvh.informerFactory
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) CSIDriverLister() storagelisters.CSIDriverLister {
|
||||
return kvh.csiDriverLister
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) CSIDriversSynced() cache.InformerSynced {
|
||||
return kvh.csiDriversSynced
|
||||
}
|
||||
|
||||
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
|
||||
func (kvh *kubeletVolumeHost) WaitForCacheSync() error {
|
||||
if kvh.csiDriversSynced == nil {
|
||||
klog.Error("csiDriversSynced not found on KubeletVolumeHost")
|
||||
return fmt.Errorf("csiDriversSynced not found on KubeletVolumeHost")
|
||||
}
|
||||
|
||||
synced := []cache.InformerSynced{kvh.csiDriversSynced}
|
||||
if !cache.WaitForCacheSync(wait.NeverStop, synced...) {
|
||||
klog.Warning("failed to wait for cache sync for CSIDriverLister")
|
||||
return fmt.Errorf("failed to wait for cache sync for CSIDriverLister")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) NewWrapperMounter(
|
||||
volName string,
|
||||
spec volume.Spec,
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/volume_manager.go
generated
vendored
@ -250,6 +250,11 @@ func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan str
|
||||
|
||||
metrics.Register(vm.actualStateOfWorld, vm.desiredStateOfWorld, vm.volumePluginMgr)
|
||||
|
||||
if vm.kubeClient != nil {
|
||||
// start informer for CSIDriver
|
||||
vm.volumePluginMgr.Run(stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
klog.Infof("Shutting down Kubelet Volume Manager")
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/proxy/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/proxy/BUILD
generated
vendored
@ -17,6 +17,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/proxy",
|
||||
deps = [
|
||||
"//pkg/api/v1/service:go_default_library",
|
||||
"//pkg/proxy/metrics:go_default_library",
|
||||
"//pkg/proxy/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
5
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/proxy/endpoints.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
@ -127,6 +128,7 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool {
|
||||
if endpoints == nil {
|
||||
return false
|
||||
}
|
||||
metrics.EndpointChangesTotal.Inc()
|
||||
namespacedName := types.NamespacedName{Namespace: endpoints.Namespace, Name: endpoints.Name}
|
||||
|
||||
ect.lock.Lock()
|
||||
@ -154,6 +156,8 @@ func (ect *EndpointChangeTracker) Update(previous, current *v1.Endpoints) bool {
|
||||
// should be exported.
|
||||
delete(ect.lastChangeTriggerTimes, namespacedName)
|
||||
}
|
||||
|
||||
metrics.EndpointChangesPending.Set(float64(len(ect.items)))
|
||||
return len(ect.items) > 0
|
||||
}
|
||||
|
||||
@ -295,6 +299,7 @@ func (endpointsMap EndpointsMap) apply(changes *EndpointChangeTracker, staleEndp
|
||||
detectStaleConnections(change.previous, change.current, staleEndpoints, staleServiceNames)
|
||||
}
|
||||
changes.items = make(map[types.NamespacedName]*endpointsChange)
|
||||
metrics.EndpointChangesPending.Set(0)
|
||||
for _, lastChangeTriggerTime := range changes.lastChangeTriggerTimes {
|
||||
*lastChangeTriggerTimes = append(*lastChangeTriggerTimes, lastChangeTriggerTime...)
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go
generated
vendored
@ -1387,6 +1387,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
if proxier.healthzServer != nil {
|
||||
proxier.healthzServer.UpdateTimestamp()
|
||||
}
|
||||
metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime()
|
||||
|
||||
// Update healthchecks. The endpoints list might include services that are
|
||||
// not "OnlyLocal", but the services list will not, and the healthChecker
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/graceful_termination.go
generated
vendored
@ -18,6 +18,7 @@ package ipvs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -167,7 +168,7 @@ func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, e
|
||||
// For UDP traffic, no graceful termination, we immediately delete the RS
|
||||
// (existing connections will be deleted on the next packet because sysctlExpireNoDestConn=1)
|
||||
// For other protocols, don't delete until all connections have expired)
|
||||
if rsToDelete.VirtualServer.Protocol != "udp" && rs.ActiveConn+rs.InactiveConn != 0 {
|
||||
if strings.ToUpper(rsToDelete.VirtualServer.Protocol) != "UDP" && rs.ActiveConn+rs.InactiveConn != 0 {
|
||||
klog.Infof("Not deleting, RS %v: %v ActiveConn, %v InactiveConn", rsToDelete.String(), rs.ActiveConn, rs.InactiveConn)
|
||||
return false, nil
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/proxy/ipvs/proxier.go
generated
vendored
@ -1245,6 +1245,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
if proxier.healthzServer != nil {
|
||||
proxier.healthzServer.UpdateTimestamp()
|
||||
}
|
||||
metrics.SyncProxyRulesLastTimestamp.SetToCurrentTime()
|
||||
|
||||
// Update healthchecks. The endpoints list might include services that are
|
||||
// not "OnlyLocal", but the services list will not, and the healthChecker
|
||||
|
55
vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go
generated
vendored
55
vendor/k8s.io/kubernetes/pkg/proxy/metrics/metrics.go
generated
vendored
@ -46,6 +46,16 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
// SyncProxyRulesLastTimestamp is the timestamp proxy rules were last
|
||||
// successfully synced.
|
||||
SyncProxyRulesLastTimestamp = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_last_timestamp_seconds",
|
||||
Help: "The last time proxy rules were successfully synced",
|
||||
},
|
||||
)
|
||||
|
||||
// NetworkProgrammingLatency is defined as the time it took to program the network - from the time
|
||||
// the service or pod has changed to the time the change was propagated and the proper kube-proxy
|
||||
// rules were synced. Exported for each endpoints object that were part of the rules sync.
|
||||
@ -63,6 +73,46 @@ var (
|
||||
Buckets: prometheus.ExponentialBuckets(0.001, 2, 20),
|
||||
},
|
||||
)
|
||||
|
||||
// EndpointChangesPending is the number of pending endpoint changes that
|
||||
// have not yet been synced to the proxy.
|
||||
EndpointChangesPending = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_endpoint_changes_pending",
|
||||
Help: "Pending proxy rules Endpoint changes",
|
||||
},
|
||||
)
|
||||
|
||||
// EndpointChangesTotal is the number of endpoint changes that the proxy
|
||||
// has seen.
|
||||
EndpointChangesTotal = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_endpoint_changes_total",
|
||||
Help: "Cumulative proxy rules Endpoint changes",
|
||||
},
|
||||
)
|
||||
|
||||
// ServiceChangesPending is the number of pending service changes that
|
||||
// have not yet been synced to the proxy.
|
||||
ServiceChangesPending = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_service_changes_pending",
|
||||
Help: "Pending proxy rules Service changes",
|
||||
},
|
||||
)
|
||||
|
||||
// ServiceChangesTotal is the number of service changes that the proxy has
|
||||
// seen.
|
||||
ServiceChangesTotal = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_service_changes_total",
|
||||
Help: "Cumulative proxy rules Service changes",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetricsOnce sync.Once
|
||||
@ -72,7 +122,12 @@ func RegisterMetrics() {
|
||||
registerMetricsOnce.Do(func() {
|
||||
prometheus.MustRegister(SyncProxyRulesLatency)
|
||||
prometheus.MustRegister(DeprecatedSyncProxyRulesLatency)
|
||||
prometheus.MustRegister(SyncProxyRulesLastTimestamp)
|
||||
prometheus.MustRegister(NetworkProgrammingLatency)
|
||||
prometheus.MustRegister(EndpointChangesPending)
|
||||
prometheus.MustRegister(EndpointChangesTotal)
|
||||
prometheus.MustRegister(ServiceChangesPending)
|
||||
prometheus.MustRegister(ServiceChangesTotal)
|
||||
})
|
||||
}
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/proxy/service.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/proxy/service.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/tools/record"
|
||||
apiservice "k8s.io/kubernetes/pkg/api/v1/service"
|
||||
"k8s.io/kubernetes/pkg/proxy/metrics"
|
||||
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
@ -198,6 +199,7 @@ func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool {
|
||||
if svc == nil {
|
||||
return false
|
||||
}
|
||||
metrics.ServiceChangesTotal.Inc()
|
||||
namespacedName := types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}
|
||||
|
||||
sct.lock.Lock()
|
||||
@ -214,6 +216,7 @@ func (sct *ServiceChangeTracker) Update(previous, current *v1.Service) bool {
|
||||
if reflect.DeepEqual(change.previous, change.current) {
|
||||
delete(sct.items, namespacedName)
|
||||
}
|
||||
metrics.ServiceChangesPending.Set(float64(len(sct.items)))
|
||||
return len(sct.items) > 0
|
||||
}
|
||||
|
||||
@ -296,6 +299,7 @@ func (serviceMap *ServiceMap) apply(changes *ServiceChangeTracker, UDPStaleClust
|
||||
}
|
||||
// clear changes after applying them to ServiceMap.
|
||||
changes.items = make(map[types.NamespacedName]*serviceChange)
|
||||
metrics.ServiceChangesPending.Set(0)
|
||||
return
|
||||
}
|
||||
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/metrics.go
generated
vendored
@ -43,6 +43,16 @@ var (
|
||||
Buckets: prometheus.ExponentialBuckets(1000, 2, 15),
|
||||
},
|
||||
)
|
||||
|
||||
// SyncProxyRulesLastTimestamp is the timestamp proxy rules were last
|
||||
// successfully synced.
|
||||
SyncProxyRulesLastTimestamp = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Subsystem: kubeProxySubsystem,
|
||||
Name: "sync_proxy_rules_last_timestamp_seconds",
|
||||
Help: "The last time proxy rules were successfully synced",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetricsOnce sync.Once
|
||||
@ -51,6 +61,7 @@ func RegisterMetrics() {
|
||||
registerMetricsOnce.Do(func() {
|
||||
prometheus.MustRegister(SyncProxyRulesLatency)
|
||||
prometheus.MustRegister(DeprecatedSyncProxyRulesLatency)
|
||||
prometheus.MustRegister(SyncProxyRulesLastTimestamp)
|
||||
})
|
||||
}
|
||||
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/proxy/winkernel/proxier.go
generated
vendored
@ -1197,6 +1197,7 @@ func (proxier *Proxier) syncProxyRules() {
|
||||
if proxier.healthzServer != nil {
|
||||
proxier.healthzServer.UpdateTimestamp()
|
||||
}
|
||||
SyncProxyRulesLastTimestamp.SetToCurrentTime()
|
||||
|
||||
// Update healthchecks. The endpoints list might include services that are
|
||||
// not "OnlyLocal", but the services list will not, and the healthChecker
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go
generated
vendored
@ -234,7 +234,7 @@ func (mounter *Mounter) IsLikelyNotMountPoint(file string) (bool, error) {
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
rootStat, err := os.Lstat(filepath.Dir(strings.TrimSuffix(file, "/")))
|
||||
rootStat, err := os.Stat(filepath.Dir(strings.TrimSuffix(file, "/")))
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "14"
|
||||
gitVersion = "v1.14.3-k3s.2"
|
||||
gitCommit = "6174f1fed28fd19300038f6578bf48e3920fa7ba"
|
||||
gitVersion = "v1.14.4-k3s.1"
|
||||
gitCommit = "ac992f3ef6c6d71e14366a1f65318e10eb3f4232"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-06-21T08:17+00:00Z"
|
||||
buildDate = "2019-07-14T07:04+00:00Z"
|
||||
)
|
||||
|
4
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/volume/BUILD
generated
vendored
@ -18,6 +18,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/volume",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume/util/fs:go_default_library",
|
||||
"//pkg/volume/util/recyclerclient:go_default_library",
|
||||
@ -29,8 +30,11 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/volume/csi/BUILD
generated
vendored
@ -31,8 +31,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
@ -73,6 +71,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
|
9
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_mounter.go
generated
vendored
@ -19,7 +19,6 @@ package csi
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
@ -276,8 +275,14 @@ func (c *csiMountMgr) podAttributes() (map[string]string, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
kletHost, ok := c.plugin.host.(volume.KubeletVolumeHost)
|
||||
if ok {
|
||||
kletHost.WaitForCacheSync()
|
||||
}
|
||||
|
||||
if c.plugin.csiDriverLister == nil {
|
||||
return nil, errors.New("CSIDriver lister does not exist")
|
||||
return nil, fmt.Errorf("CSIDriverLister not found")
|
||||
}
|
||||
|
||||
csiDriver, err := c.plugin.csiDriverLister.Get(string(c.driverName))
|
||||
|
38
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/volume/csi/csi_plugin.go
generated
vendored
@ -35,12 +35,9 @@ import (
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
csiapiinformer "k8s.io/client-go/informers"
|
||||
csiinformer "k8s.io/client-go/informers/storage/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
csilister "k8s.io/client-go/listers/storage/v1beta1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/csi/nodeinfomanager"
|
||||
@ -73,10 +70,9 @@ var (
|
||||
var deprecatedSocketDirVersions = []string{"0.1.0", "0.2.0", "0.3.0", "0.4.0"}
|
||||
|
||||
type csiPlugin struct {
|
||||
host volume.VolumeHost
|
||||
blockEnabled bool
|
||||
csiDriverLister csilister.CSIDriverLister
|
||||
csiDriverInformer csiinformer.CSIDriverInformer
|
||||
host volume.VolumeHost
|
||||
blockEnabled bool
|
||||
csiDriverLister storagelisters.CSIDriverLister
|
||||
}
|
||||
|
||||
//TODO (vladimirvivien) add this type to storage api
|
||||
@ -244,11 +240,21 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
|
||||
if csiClient == nil {
|
||||
klog.Warning(log("kubeclient not set, assuming standalone kubelet"))
|
||||
} else {
|
||||
// Start informer for CSIDrivers.
|
||||
factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod)
|
||||
p.csiDriverInformer = factory.Storage().V1beta1().CSIDrivers()
|
||||
p.csiDriverLister = p.csiDriverInformer.Lister()
|
||||
go factory.Start(wait.NeverStop)
|
||||
// set CSIDriverLister
|
||||
adcHost, ok := host.(volume.AttachDetachVolumeHost)
|
||||
if ok {
|
||||
p.csiDriverLister = adcHost.CSIDriverLister()
|
||||
if p.csiDriverLister == nil {
|
||||
klog.Error(log("CSIDriverLister not found on AttachDetachVolumeHost"))
|
||||
}
|
||||
}
|
||||
kletHost, ok := host.(volume.KubeletVolumeHost)
|
||||
if ok {
|
||||
p.csiDriverLister = kletHost.CSIDriverLister()
|
||||
if p.csiDriverLister == nil {
|
||||
klog.Error(log("CSIDriverLister not found on KubeletVolumeHost"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -682,6 +688,12 @@ func (p *csiPlugin) skipAttach(driver string) (bool, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
kletHost, ok := p.host.(volume.KubeletVolumeHost)
|
||||
if ok {
|
||||
kletHost.WaitForCacheSync()
|
||||
}
|
||||
|
||||
if p.csiDriverLister == nil {
|
||||
return false, errors.New("CSIDriver lister does not exist")
|
||||
}
|
||||
|
16
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/volume/flexvolume/unmounter.go
generated
vendored
@ -42,15 +42,15 @@ func (f *flexVolumeUnmounter) TearDown() error {
|
||||
}
|
||||
|
||||
func (f *flexVolumeUnmounter) TearDownAt(dir string) error {
|
||||
|
||||
pathExists, pathErr := mount.PathExists(dir)
|
||||
if !pathExists {
|
||||
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
if pathErr != nil && !mount.IsCorruptedMnt(pathErr) {
|
||||
return fmt.Errorf("Error checking path: %v", pathErr)
|
||||
if pathErr != nil {
|
||||
// only log warning here since plugins should anyways have to deal with errors
|
||||
klog.Warningf("Error checking path: %v", pathErr)
|
||||
} else {
|
||||
if !pathExists {
|
||||
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
call := f.plugin.NewDriverCall(unmountCmd)
|
||||
|
27
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/volume/plugins.go
generated
vendored
@ -25,10 +25,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
@ -317,6 +321,15 @@ type KubeletVolumeHost interface {
|
||||
// SetKubeletError lets plugins set an error on the Kubelet runtime status
|
||||
// that will cause the Kubelet to post NotReady status with the error message provided
|
||||
SetKubeletError(err error)
|
||||
|
||||
// GetInformerFactory returns the informer factory for CSIDriverLister
|
||||
GetInformerFactory() informers.SharedInformerFactory
|
||||
// CSIDriverLister returns the informer lister for the CSIDriver API Object
|
||||
CSIDriverLister() storagelisters.CSIDriverLister
|
||||
// CSIDriverSynced returns the informer synced for the CSIDriver API Object
|
||||
CSIDriversSynced() cache.InformerSynced
|
||||
// WaitForCacheSync is a helper function that waits for cache sync for CSIDriverLister
|
||||
WaitForCacheSync() error
|
||||
}
|
||||
|
||||
// AttachDetachVolumeHost is a AttachDetach Controller specific interface that plugins can use
|
||||
@ -325,6 +338,9 @@ type AttachDetachVolumeHost interface {
|
||||
// CSINodeLister returns the informer lister for the CSINode API Object
|
||||
CSINodeLister() storagelisters.CSINodeLister
|
||||
|
||||
// CSIDriverLister returns the informer lister for the CSIDriver API Object
|
||||
CSIDriverLister() storagelisters.CSIDriverLister
|
||||
|
||||
// IsAttachDetachController is an interface marker to strictly tie AttachDetachVolumeHost
|
||||
// to the attachDetachController
|
||||
IsAttachDetachController() bool
|
||||
@ -1003,6 +1019,17 @@ func (pm *VolumePluginMgr) FindNodeExpandablePluginByName(name string) (NodeExpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (pm *VolumePluginMgr) Run(stopCh <-chan struct{}) {
|
||||
kletHost, ok := pm.Host.(KubeletVolumeHost)
|
||||
if ok {
|
||||
// start informer for CSIDriver
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) {
|
||||
informerFactory := kletHost.GetInformerFactory()
|
||||
go informerFactory.Start(stopCh)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler
|
||||
// pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests
|
||||
// for emptiness. Most attributes of the template will be correct for most
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go
generated
vendored
@ -398,7 +398,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error {
|
||||
return fmt.Errorf("cannot create directory %s: %s", currentPath, err)
|
||||
}
|
||||
// Dive into the created directory
|
||||
childFD, err := syscall.Openat(parentFD, dir, nofollowFlags, 0)
|
||||
childFD, err = syscall.Openat(parentFD, dir, nofollowFlags, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %s: %s", currentPath, err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user