mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Update vendor
This commit is contained in:
parent
eda8319841
commit
57f0959b93
38
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
38
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
@ -229,6 +229,10 @@ type decoder struct {
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
strict bool
|
||||
|
||||
decodeCount int
|
||||
aliasCount int
|
||||
aliasDepth int
|
||||
}
|
||||
|
||||
var (
|
||||
@ -314,7 +318,39 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
const (
|
||||
// 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion
|
||||
alias_ratio_range_low = 400000
|
||||
// 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion
|
||||
alias_ratio_range_high = 4000000
|
||||
// alias_ratio_range is the range over which we scale allowed alias ratios
|
||||
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
|
||||
)
|
||||
|
||||
func allowedAliasRatio(decodeCount int) float64 {
|
||||
switch {
|
||||
case decodeCount <= alias_ratio_range_low:
|
||||
// allow 99% to come from alias expansion for small-to-medium documents
|
||||
return 0.99
|
||||
case decodeCount >= alias_ratio_range_high:
|
||||
// allow 10% to come from alias expansion for very large documents
|
||||
return 0.10
|
||||
default:
|
||||
// scale smoothly from 99% down to 10% over the range.
|
||||
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
|
||||
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
|
||||
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
d.decodeCount++
|
||||
if d.aliasDepth > 0 {
|
||||
d.aliasCount++
|
||||
}
|
||||
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
|
||||
failf("document contains excessive aliasing")
|
||||
}
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
@ -353,7 +389,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n] = true
|
||||
d.aliasDepth++
|
||||
good = d.unmarshal(n.alias, out)
|
||||
d.aliasDepth--
|
||||
delete(d.aliases, n)
|
||||
return good
|
||||
}
|
||||
|
2
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
2
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
@ -81,7 +81,7 @@ func resolvableTag(tag string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
if !resolvableTag(tag) {
|
||||
|
16
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
16
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
@ -906,6 +906,9 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// max_flow_level limits the flow_level
|
||||
const max_flow_level = 10000
|
||||
|
||||
// Increase the flow level and resize the simple key list if needed.
|
||||
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||||
// Reset the simple key on the next level.
|
||||
@ -913,6 +916,11 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||||
|
||||
// Increase the flow level.
|
||||
parser.flow_level++
|
||||
if parser.flow_level > max_flow_level {
|
||||
return yaml_parser_set_scanner_error(parser,
|
||||
"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
|
||||
fmt.Sprintf("exceeded max depth of %d", max_flow_level))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@ -925,6 +933,9 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// max_indents limits the indents stack size
|
||||
const max_indents = 10000
|
||||
|
||||
// Push the current indentation level to the stack and set the new level
|
||||
// the current column is greater than the indentation level. In this case,
|
||||
// append or insert the specified token into the token queue.
|
||||
@ -939,6 +950,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml
|
||||
// indentation level.
|
||||
parser.indents = append(parser.indents, parser.indent)
|
||||
parser.indent = column
|
||||
if len(parser.indents) > max_indents {
|
||||
return yaml_parser_set_scanner_error(parser,
|
||||
"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
|
||||
fmt.Sprintf("exceeded max depth of %d", max_indents))
|
||||
}
|
||||
|
||||
// Create a token and insert it into the queue.
|
||||
token := yaml_token_t{
|
||||
|
1
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go
generated
vendored
1
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/apiserver.go
generated
vendored
@ -201,6 +201,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
|
||||
c.GenericConfig.RequestTimeout,
|
||||
time.Duration(c.GenericConfig.MinRequestTimeout)*time.Second,
|
||||
apiGroupInfo.StaticOpenAPISpec,
|
||||
c.GenericConfig.MaxRequestBodyBytes,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
10
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go
generated
vendored
10
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go
generated
vendored
@ -125,6 +125,10 @@ type crdHandler struct {
|
||||
// purpose of managing fields, it is how CR handlers get the structure
|
||||
// of TypeMeta and ObjectMeta
|
||||
staticOpenAPISpec *spec.Swagger
|
||||
|
||||
// The limit on the request size that would be accepted and decoded in a write request
|
||||
// 0 means no limit.
|
||||
maxRequestBodyBytes int64
|
||||
}
|
||||
|
||||
// crdInfo stores enough information to serve the storage for the custom resource
|
||||
@ -169,7 +173,8 @@ func NewCustomResourceDefinitionHandler(
|
||||
authorizer authorizer.Authorizer,
|
||||
requestTimeout time.Duration,
|
||||
minRequestTimeout time.Duration,
|
||||
staticOpenAPISpec *spec.Swagger) (*crdHandler, error) {
|
||||
staticOpenAPISpec *spec.Swagger,
|
||||
maxRequestBodyBytes int64) (*crdHandler, error) {
|
||||
ret := &crdHandler{
|
||||
versionDiscoveryHandler: versionDiscoveryHandler,
|
||||
groupDiscoveryHandler: groupDiscoveryHandler,
|
||||
@ -185,6 +190,7 @@ func NewCustomResourceDefinitionHandler(
|
||||
requestTimeout: requestTimeout,
|
||||
minRequestTimeout: minRequestTimeout,
|
||||
staticOpenAPISpec: staticOpenAPISpec,
|
||||
maxRequestBodyBytes: maxRequestBodyBytes,
|
||||
}
|
||||
crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ret.createCustomResourceDefinition,
|
||||
@ -812,6 +818,8 @@ func (r *crdHandler) getOrCreateServingInfoFor(uid types.UID, name string) (*crd
|
||||
TableConvertor: storages[v.Name].CustomResource,
|
||||
|
||||
Authorizer: r.authorizer,
|
||||
|
||||
MaxRequestBodyBytes: r.maxRequestBodyBytes,
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) {
|
||||
reqScope := *requestScopes[v.Name]
|
||||
|
1
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD
generated
vendored
1
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD
generated
vendored
@ -48,6 +48,7 @@ go_test(
|
||||
srcs = [
|
||||
"convert_test.go",
|
||||
"goopenapi_test.go",
|
||||
"unfold_test.go",
|
||||
"validation_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
2
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go
generated
vendored
2
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go
generated
vendored
@ -30,7 +30,7 @@ type Structural struct {
|
||||
Generic
|
||||
Extensions
|
||||
|
||||
*ValueValidation
|
||||
ValueValidation *ValueValidation
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=true
|
||||
|
11
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go
generated
vendored
11
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go
generated
vendored
@ -35,13 +35,16 @@ func (s *Structural) Unfold() *Structural {
|
||||
return false
|
||||
}
|
||||
|
||||
if s.AnyOf == nil {
|
||||
s.AnyOf = []NestedValueValidation{
|
||||
if s.ValueValidation == nil {
|
||||
s.ValueValidation = &ValueValidation{}
|
||||
}
|
||||
if s.ValueValidation.AnyOf == nil {
|
||||
s.ValueValidation.AnyOf = []NestedValueValidation{
|
||||
{ForbiddenGenerics: Generic{Type: "integer"}},
|
||||
{ForbiddenGenerics: Generic{Type: "string"}},
|
||||
}
|
||||
} else {
|
||||
s.AllOf = append([]NestedValueValidation{
|
||||
s.ValueValidation.AllOf = append([]NestedValueValidation{
|
||||
{
|
||||
ValueValidation: ValueValidation{
|
||||
AnyOf: []NestedValueValidation{
|
||||
@ -50,7 +53,7 @@ func (s *Structural) Unfold() *Structural {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, s.AllOf...)
|
||||
}, s.ValueValidation.AllOf...)
|
||||
}
|
||||
|
||||
return true
|
||||
|
2
vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/BUILD
generated
vendored
@ -9,6 +9,7 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"json_limit_test.go",
|
||||
"json_test.go",
|
||||
"meta_test.go",
|
||||
],
|
||||
@ -17,6 +18,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
20
vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
generated
vendored
20
vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
generated
vendored
@ -122,7 +122,27 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
|
||||
}
|
||||
iter.ReportError("DecodeNumber", err.Error())
|
||||
default:
|
||||
// init depth, if needed
|
||||
if iter.Attachment == nil {
|
||||
iter.Attachment = int(1)
|
||||
}
|
||||
|
||||
// remember current depth
|
||||
originalAttachment := iter.Attachment
|
||||
|
||||
// increment depth before descending
|
||||
if i, ok := iter.Attachment.(int); ok {
|
||||
iter.Attachment = i + 1
|
||||
if i > 10000 {
|
||||
iter.ReportError("parse", "exceeded max depth")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
*(*interface{})(ptr) = iter.Read()
|
||||
|
||||
// restore current depth
|
||||
iter.Attachment = originalAttachment
|
||||
}
|
||||
}
|
||||
|
||||
|
16
vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/BUILD
generated
vendored
16
vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/BUILD
generated
vendored
@ -1,9 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -29,3 +26,14 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["yaml_test.go"],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
28
vendor/k8s.io/apimachinery/pkg/util/json/json.go
generated
vendored
28
vendor/k8s.io/apimachinery/pkg/util/json/json.go
generated
vendored
@ -19,6 +19,7 @@ package json
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
// limit recursive depth to prevent stack overflow errors
|
||||
const maxDepth = 10000
|
||||
|
||||
// Unmarshal unmarshals the given data
|
||||
// If v is a *map[string]interface{}, numbers are converted to int64 or float64
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||
return err
|
||||
}
|
||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertMapNumbers(*v)
|
||||
return convertMapNumbers(*v, 0)
|
||||
|
||||
case *[]interface{}:
|
||||
// Build a decoder from the given data
|
||||
@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||
return err
|
||||
}
|
||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertSliceNumbers(*v)
|
||||
return convertSliceNumbers(*v, 0)
|
||||
|
||||
default:
|
||||
return json.Unmarshal(data, v)
|
||||
@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||
|
||||
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
|
||||
// values which are map[string]interface{} or []interface{} are recursively visited
|
||||
func convertMapNumbers(m map[string]interface{}) error {
|
||||
func convertMapNumbers(m map[string]interface{}, depth int) error {
|
||||
if depth > maxDepth {
|
||||
return fmt.Errorf("exceeded max depth of %d", maxDepth)
|
||||
}
|
||||
|
||||
var err error
|
||||
for k, v := range m {
|
||||
switch v := v.(type) {
|
||||
case json.Number:
|
||||
m[k], err = convertNumber(v)
|
||||
case map[string]interface{}:
|
||||
err = convertMapNumbers(v)
|
||||
err = convertMapNumbers(v, depth+1)
|
||||
case []interface{}:
|
||||
err = convertSliceNumbers(v)
|
||||
err = convertSliceNumbers(v, depth+1)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error {
|
||||
|
||||
// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
|
||||
// values which are map[string]interface{} or []interface{} are recursively visited
|
||||
func convertSliceNumbers(s []interface{}) error {
|
||||
func convertSliceNumbers(s []interface{}, depth int) error {
|
||||
if depth > maxDepth {
|
||||
return fmt.Errorf("exceeded max depth of %d", maxDepth)
|
||||
}
|
||||
|
||||
var err error
|
||||
for i, v := range s {
|
||||
switch v := v.(type) {
|
||||
case json.Number:
|
||||
s[i], err = convertNumber(v)
|
||||
case map[string]interface{}:
|
||||
err = convertMapNumbers(v)
|
||||
err = convertMapNumbers(v, depth+1)
|
||||
case []interface{}:
|
||||
err = convertSliceNumbers(v)
|
||||
err = convertSliceNumbers(v, depth+1)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
4
vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go
generated
vendored
4
vendor/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/fieldmanager.go
generated
vendored
@ -190,11 +190,11 @@ func (f *FieldManager) Apply(liveObj runtime.Object, patch []byte, fieldManager
|
||||
patchObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
|
||||
|
||||
if err := yaml.Unmarshal(patch, &patchObj.Object); err != nil {
|
||||
return nil, fmt.Errorf("error decoding YAML: %v", err)
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding YAML: %v", err))
|
||||
}
|
||||
|
||||
if patchObj.GetManagedFields() != nil {
|
||||
return nil, fmt.Errorf("managed fields must be nil but was %v", patchObj.GetManagedFields())
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("metadata.managedFields must be nil"))
|
||||
}
|
||||
|
||||
if patchObj.GetAPIVersion() != f.groupVersion.String() {
|
||||
|
18
vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go
generated
vendored
18
vendor/k8s.io/apiserver/pkg/endpoints/handlers/patch.go
generated
vendored
@ -337,6 +337,15 @@ func (p *jsonPatcher) createNewObject() (runtime.Object, error) {
|
||||
func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr error) {
|
||||
switch p.patchType {
|
||||
case types.JSONPatchType:
|
||||
// sanity check potentially abusive patches
|
||||
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
|
||||
if len(p.patchBytes) > 1024*1024 {
|
||||
v := []interface{}{}
|
||||
if err := json.Unmarshal(p.patchBytes, v); err != nil {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
patchObj, err := jsonpatch.DecodePatch(p.patchBytes)
|
||||
if err != nil {
|
||||
return nil, errors.NewBadRequest(err.Error())
|
||||
@ -352,6 +361,15 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr
|
||||
}
|
||||
return patchedJS, nil
|
||||
case types.MergePatchType:
|
||||
// sanity check potentially abusive patches
|
||||
// TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
|
||||
if len(p.patchBytes) > 1024*1024 {
|
||||
v := map[string]interface{}{}
|
||||
if err := json.Unmarshal(p.patchBytes, v); err != nil {
|
||||
return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
return jsonpatch.MergePatch(versionedJS, p.patchBytes)
|
||||
default:
|
||||
// only here as a safety net - go-restful filters content-type
|
||||
|
31
vendor/k8s.io/apiserver/pkg/server/config.go
generated
vendored
31
vendor/k8s.io/apiserver/pkg/server/config.go
generated
vendored
@ -180,7 +180,7 @@ type Config struct {
|
||||
// patch may cause.
|
||||
// This affects all places that applies json patch in the binary.
|
||||
JSONPatchMaxCopyBytes int64
|
||||
// The limit on the request body size that would be accepted and decoded in a write request.
|
||||
// The limit on the request size that would be accepted and decoded in a write request
|
||||
// 0 means no limit.
|
||||
MaxRequestBodyBytes int64
|
||||
// MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further
|
||||
@ -297,22 +297,20 @@ func NewConfig(codecs serializer.CodecFactory) *Config {
|
||||
MinRequestTimeout: 1800,
|
||||
LivezGracePeriod: time.Duration(0),
|
||||
ShutdownDelayDuration: time.Duration(0),
|
||||
// 10MB is the recommended maximum client request size in bytes
|
||||
// 1.5MB is the default client request size in bytes
|
||||
// the etcd server should accept. See
|
||||
// https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90.
|
||||
// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56.
|
||||
// A request body might be encoded in json, and is converted to
|
||||
// proto when persisted in etcd. Assuming the upper bound of
|
||||
// the size ratio is 10:1, we set 100MB as the largest size
|
||||
// proto when persisted in etcd, so we allow 2x as the largest size
|
||||
// increase the "copy" operations in a json patch may cause.
|
||||
JSONPatchMaxCopyBytes: int64(100 * 1024 * 1024),
|
||||
// 10MB is the recommended maximum client request size in bytes
|
||||
JSONPatchMaxCopyBytes: int64(3 * 1024 * 1024),
|
||||
// 1.5MB is the recommended client request size in byte
|
||||
// the etcd server should accept. See
|
||||
// https://github.com/etcd-io/etcd/blob/release-3.3/etcdserver/server.go#L90.
|
||||
// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56.
|
||||
// A request body might be encoded in json, and is converted to
|
||||
// proto when persisted in etcd. Assuming the upper bound of
|
||||
// the size ratio is 10:1, we set 100MB as the largest request
|
||||
// proto when persisted in etcd, so we allow 2x as the largest request
|
||||
// body size to be accepted and decoded in a write request.
|
||||
MaxRequestBodyBytes: int64(100 * 1024 * 1024),
|
||||
MaxRequestBodyBytes: int64(3 * 1024 * 1024),
|
||||
|
||||
// Default to treating watch as a long-running operation
|
||||
// Generic API servers have no inherent long-running subresources
|
||||
@ -382,6 +380,17 @@ type CompletedConfig struct {
|
||||
*completedConfig
|
||||
}
|
||||
|
||||
// AddHealthChecks adds a health check to our config to be exposed by the health endpoints
|
||||
// of our configured apiserver. We should prefer this to adding healthChecks directly to
|
||||
// the config unless we explicitly want to add a healthcheck only to a specific health endpoint.
|
||||
func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) {
|
||||
for _, check := range healthChecks {
|
||||
c.HealthzChecks = append(c.HealthzChecks, check)
|
||||
c.LivezChecks = append(c.LivezChecks, check)
|
||||
c.ReadyzChecks = append(c.ReadyzChecks, check)
|
||||
}
|
||||
}
|
||||
|
||||
// Complete fills in any fields not set that are required to have valid data and can be derived
|
||||
// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver.
|
||||
func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig {
|
||||
|
9
vendor/k8s.io/apiserver/pkg/server/config_selfclient.go
generated
vendored
9
vendor/k8s.io/apiserver/pkg/server/config_selfclient.go
generated
vendored
@ -38,12 +38,9 @@ func (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config,
|
||||
}
|
||||
|
||||
return &restclient.Config{
|
||||
// Increase QPS limits. The client is currently passed to all admission plugins,
|
||||
// and those can be throttled in case of higher load on apiserver - see #22340 and #22422
|
||||
// for more details. Once #22422 is fixed, we may want to remove it.
|
||||
QPS: 50,
|
||||
Burst: 100,
|
||||
Host: "https://" + net.JoinHostPort(host, port),
|
||||
// Do not limit loopback client QPS.
|
||||
QPS: -1,
|
||||
Host: "https://" + net.JoinHostPort(host, port),
|
||||
// override the ServerName to select our loopback certificate via SNI. This name is also
|
||||
// used by the client to compare the returns server certificate against.
|
||||
TLSClientConfig: restclient.TLSClientConfig{
|
||||
|
18
vendor/k8s.io/apiserver/pkg/server/filters/timeout.go
generated
vendored
18
vendor/k8s.io/apiserver/pkg/server/filters/timeout.go
generated
vendored
@ -28,6 +28,7 @@ import (
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/endpoints/metrics"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
)
|
||||
@ -119,6 +120,23 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
return
|
||||
case <-after:
|
||||
defer func() {
|
||||
// resultCh needs to have a reader, since the function doing
|
||||
// the work needs to send to it. This is defer'd to ensure it runs
|
||||
// ever if the post timeout work itself panics.
|
||||
go func() {
|
||||
res := <-resultCh
|
||||
if res != nil {
|
||||
switch t := res.(type) {
|
||||
case error:
|
||||
utilruntime.HandleError(t)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("%v", res))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
postTimeoutFn()
|
||||
tw.timeout(err)
|
||||
}
|
||||
|
5
vendor/k8s.io/apiserver/pkg/server/options/etcd.go
generated
vendored
5
vendor/k8s.io/apiserver/pkg/server/options/etcd.go
generated
vendored
@ -202,7 +202,7 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.HealthzChecks = append(c.HealthzChecks, healthz.NamedCheck("etcd", func(r *http.Request) error {
|
||||
c.AddHealthChecks(healthz.NamedCheck("etcd", func(r *http.Request) error {
|
||||
return healthCheck()
|
||||
}))
|
||||
|
||||
@ -211,8 +211,7 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.HealthzChecks = append(c.HealthzChecks, kmsPluginHealthzChecks...)
|
||||
c.AddHealthChecks(kmsPluginHealthzChecks...)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
1
vendor/k8s.io/apiserver/pkg/storage/value/BUILD
generated
vendored
1
vendor/k8s.io/apiserver/pkg/storage/value/BUILD
generated
vendored
@ -30,6 +30,7 @@ go_library(
|
||||
importmap = "k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/storage/value",
|
||||
importpath = "k8s.io/apiserver/pkg/storage/value",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
|
42
vendor/k8s.io/apiserver/pkg/storage/value/transformer.go
generated
vendored
42
vendor/k8s.io/apiserver/pkg/storage/value/transformer.go
generated
vendored
@ -22,6 +22,8 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -129,6 +131,7 @@ func NewPrefixTransformers(err error, transformers ...PrefixTransformer) Transfo
|
||||
// the first transformer.
|
||||
func (t *prefixTransformers) TransformFromStorage(data []byte, context Context) ([]byte, bool, error) {
|
||||
start := time.Now()
|
||||
var errs []error
|
||||
for i, transformer := range t.transformers {
|
||||
if bytes.HasPrefix(data, transformer.Prefix) {
|
||||
result, stale, err := transformer.Transformer.TransformFromStorage(data[len(transformer.Prefix):], context)
|
||||
@ -144,9 +147,48 @@ func (t *prefixTransformers) TransformFromStorage(data []byte, context Context)
|
||||
} else {
|
||||
RecordTransformation("from_storage", string(transformer.Prefix), start, err)
|
||||
}
|
||||
|
||||
// It is valid to have overlapping prefixes when the same encryption provider
|
||||
// is specified multiple times but with different keys (the first provider is
|
||||
// being rotated to and some later provider is being rotated away from).
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// {
|
||||
// "aescbc": {
|
||||
// "keys": [
|
||||
// {
|
||||
// "name": "2",
|
||||
// "secret": "some key 2"
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "aescbc": {
|
||||
// "keys": [
|
||||
// {
|
||||
// "name": "1",
|
||||
// "secret": "some key 1"
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// },
|
||||
//
|
||||
// The transformers for both aescbc configs share the prefix k8s:enc:aescbc:v1:
|
||||
// but a failure in the first one should not prevent a later match from being attempted.
|
||||
// Thus we never short-circuit on a prefix match that results in an error.
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
return result, stale || i != 0, err
|
||||
}
|
||||
}
|
||||
if err := errors.Reduce(errors.NewAggregate(errs)); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
RecordTransformation("from_storage", "unknown", start, t.err)
|
||||
return nil, false, t.err
|
||||
}
|
||||
|
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/client-go/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.0-k3s.1"
|
||||
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
|
||||
gitVersion = "v1.16.2-k3s.1"
|
||||
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-09-27T21:33Z"
|
||||
buildDate = "2019-10-16T05:17Z"
|
||||
)
|
||||
|
2
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@ -209,7 +209,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS
|
||||
// if the controller should shutdown
|
||||
// callers should prefer WaitForNamedCacheSync()
|
||||
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
|
||||
err := wait.PollUntil(syncedPollPeriod,
|
||||
err := wait.PollImmediateUntil(syncedPollPeriod,
|
||||
func() (bool, error) {
|
||||
for _, syncFunc := range cacheSyncs {
|
||||
if !syncFunc() {
|
||||
|
2
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
2
vendor/k8s.io/cloud-provider/go.sum
generated
vendored
@ -158,6 +158,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
|
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
6
vendor/k8s.io/component-base/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.0-k3s.1"
|
||||
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
|
||||
gitVersion = "v1.16.2-k3s.1"
|
||||
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-09-27T21:33Z"
|
||||
buildDate = "2019-10-16T05:17Z"
|
||||
)
|
||||
|
2
vendor/k8s.io/csi-translation-lib/go.sum
generated
vendored
2
vendor/k8s.io/csi-translation-lib/go.sum
generated
vendored
@ -139,6 +139,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
|
6
vendor/k8s.io/kubectl/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/kubectl/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.0-k3s.1"
|
||||
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
|
||||
gitVersion = "v1.16.2-k3s.1"
|
||||
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-09-27T21:33Z"
|
||||
buildDate = "2019-10-16T05:17Z"
|
||||
)
|
||||
|
16
vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
generated
vendored
16
vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go
generated
vendored
@ -585,14 +585,6 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
|
||||
}
|
||||
}
|
||||
|
||||
// If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 &&
|
||||
kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce {
|
||||
if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if kubeDeps.Auth == nil {
|
||||
auth, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration)
|
||||
if err != nil {
|
||||
@ -722,6 +714,14 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan
|
||||
return err
|
||||
}
|
||||
|
||||
// If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 &&
|
||||
kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce {
|
||||
if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.HealthzPort > 0 {
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
|
7
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go
generated
vendored
@ -202,6 +202,11 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
|
||||
klog.V(3).Infof("Error deleting Secret: %v", err)
|
||||
}
|
||||
} else if ttl > 0 {
|
||||
tc.queue.AddAfter(o, ttl)
|
||||
key, err := controller.KeyFunc(o)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
tc.queue.AddAfter(key, ttl)
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go
generated
vendored
@ -293,7 +293,7 @@ func GetNUMANodeInfo() (NUMANodeInfo, error) {
|
||||
// nil NUMANodeInfo, indicating that no NUMA information is available
|
||||
// on this machine. This should implicitly be interpreted as having a
|
||||
// single NUMA node with id 0 for all CPUs.
|
||||
nodelist, err := ioutil.ReadFile("/sys/devices/system/node/possible")
|
||||
nodelist, err := ioutil.ReadFile("/sys/devices/system/node/online")
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/topology_manager.go
generated
vendored
@ -192,7 +192,7 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
|
||||
// If hints is nil, insert a single, preferred any-numa hint into allProviderHints.
|
||||
if len(hints) == 0 {
|
||||
klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with any resource")
|
||||
allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, true}})
|
||||
allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}})
|
||||
continue
|
||||
}
|
||||
|
||||
@ -200,13 +200,13 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
|
||||
for resource := range hints {
|
||||
if hints[resource] == nil {
|
||||
klog.Infof("[topologymanager] Hint Provider has no preference for NUMA affinity with resource '%s'", resource)
|
||||
allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, true}})
|
||||
allProviderHints = append(allProviderHints, []TopologyHint{{nil, true}})
|
||||
continue
|
||||
}
|
||||
|
||||
if len(hints[resource]) == 0 {
|
||||
klog.Infof("[topologymanager] Hint Provider has no possible NUMA affinities for resource '%s'", resource)
|
||||
allProviderHints = append(allProviderHints, []TopologyHint{{defaultAffinity, false}})
|
||||
allProviderHints = append(allProviderHints, []TopologyHint{{nil, false}})
|
||||
continue
|
||||
}
|
||||
|
||||
@ -226,18 +226,21 @@ func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) Topology
|
||||
preferred := true
|
||||
var numaAffinities []socketmask.SocketMask
|
||||
for _, hint := range permutation {
|
||||
// Only consider hints that have an actual NUMANodeAffinity set.
|
||||
if hint.NUMANodeAffinity != nil {
|
||||
if !hint.Preferred {
|
||||
preferred = false
|
||||
}
|
||||
// Special case PolicySingleNumaNode to only prefer hints where
|
||||
// all providers have a single NUMA affinity set.
|
||||
if m.policy != nil && m.policy.Name() == PolicySingleNumaNode && hint.NUMANodeAffinity.Count() > 1 {
|
||||
preferred = false
|
||||
}
|
||||
if hint.NUMANodeAffinity == nil {
|
||||
numaAffinities = append(numaAffinities, defaultAffinity)
|
||||
} else {
|
||||
numaAffinities = append(numaAffinities, hint.NUMANodeAffinity)
|
||||
}
|
||||
|
||||
if !hint.Preferred {
|
||||
preferred = false
|
||||
}
|
||||
|
||||
// Special case PolicySingleNumaNode to only prefer hints where
|
||||
// all providers have a single NUMA affinity set.
|
||||
if m.policy != nil && m.policy.Name() == PolicySingleNumaNode && hint.NUMANodeAffinity != nil && hint.NUMANodeAffinity.Count() > 1 {
|
||||
preferred = false
|
||||
}
|
||||
}
|
||||
|
||||
// Merge the affinities using a bitwise-and operation.
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
@ -42,6 +42,8 @@ type streamingRuntime struct {
|
||||
|
||||
var _ streaming.Runtime = &streamingRuntime{}
|
||||
|
||||
const maxMsgSize = 1024 * 1024 * 16
|
||||
|
||||
func (r *streamingRuntime) Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
return r.exec(containerID, cmd, in, out, err, tty, resize, 0)
|
||||
}
|
||||
@ -78,8 +80,8 @@ func (ds *dockerService) ExecSync(_ context.Context, req *runtimeapi.ExecSyncReq
|
||||
var stdoutBuffer, stderrBuffer bytes.Buffer
|
||||
err := ds.streamingRuntime.exec(req.ContainerId, req.Cmd,
|
||||
nil, // in
|
||||
ioutils.WriteCloserWrapper(&stdoutBuffer),
|
||||
ioutils.WriteCloserWrapper(&stderrBuffer),
|
||||
ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stdoutBuffer, maxMsgSize)),
|
||||
ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stderrBuffer, maxMsgSize)),
|
||||
false, // tty
|
||||
nil, // resize
|
||||
timeout)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
|
||||
func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
|
||||
stderr := new(bytes.Buffer)
|
||||
err := r.exec(podSandboxID, []string{"wincat.exe", "localhost", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0)
|
||||
err := r.exec(podSandboxID, []string{"wincat.exe", "127.0.0.1", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v: %s", err, stderr.String())
|
||||
}
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go
generated
vendored
@ -80,10 +80,7 @@ const (
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"ranges": [%s],
|
||||
"routes": [
|
||||
{ "dst": "%s" },
|
||||
{ "dst": "%s" }
|
||||
]
|
||||
"routes": [%s]
|
||||
}
|
||||
}`
|
||||
)
|
||||
@ -283,7 +280,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf
|
||||
//setup hairpinMode
|
||||
setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth
|
||||
|
||||
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), zeroCIDRv4, zeroCIDRv6)
|
||||
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), plugin.getRoutesConfig())
|
||||
klog.V(4).Infof("CNI network config set to %v", json)
|
||||
plugin.netConfig, err = libcni.ConfFromBytes([]byte(json))
|
||||
if err != nil {
|
||||
@ -844,6 +841,29 @@ func (plugin *kubenetNetworkPlugin) getRangesConfig() string {
|
||||
return strings.Join(ranges[:], ",")
|
||||
}
|
||||
|
||||
// given a n cidrs assigned to nodes,
|
||||
// create bridge routes configuration that conforms to them
|
||||
func (plugin *kubenetNetworkPlugin) getRoutesConfig() string {
|
||||
var (
|
||||
routes []string
|
||||
hasV4, hasV6 bool
|
||||
)
|
||||
for _, thisCIDR := range plugin.podCIDRs {
|
||||
if thisCIDR.IP.To4() != nil {
|
||||
hasV4 = true
|
||||
} else {
|
||||
hasV6 = true
|
||||
}
|
||||
}
|
||||
if hasV4 {
|
||||
routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv4))
|
||||
}
|
||||
if hasV6 {
|
||||
routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv6))
|
||||
}
|
||||
return strings.Join(routes, ",")
|
||||
}
|
||||
|
||||
func (plugin *kubenetNetworkPlugin) addPodIP(id kubecontainer.ContainerID, ip string) {
|
||||
plugin.mu.Lock()
|
||||
defer plugin.mu.Unlock()
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD
generated
vendored
@ -58,6 +58,7 @@ go_test(
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/kubelet/util/ioutils:go_default_library",
|
||||
"//pkg/probe:go_default_library",
|
||||
"//pkg/probe/exec:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go
generated
vendored
@ -252,63 +252,68 @@ func formatURL(scheme string, host string, port int, path string) *url.URL {
|
||||
type execInContainer struct {
|
||||
// run executes a command in a container. Combined stdout and stderr output is always returned. An
|
||||
// error is returned if one occurred.
|
||||
run func() ([]byte, error)
|
||||
run func() ([]byte, error)
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd {
|
||||
return execInContainer{func() ([]byte, error) {
|
||||
return &execInContainer{run: func() ([]byte, error) {
|
||||
return pb.runner.RunInContainer(containerID, cmd, timeout)
|
||||
}}
|
||||
}
|
||||
|
||||
func (eic execInContainer) Run() error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
func (eic *execInContainer) Run() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eic execInContainer) CombinedOutput() ([]byte, error) {
|
||||
func (eic *execInContainer) CombinedOutput() ([]byte, error) {
|
||||
return eic.run()
|
||||
}
|
||||
|
||||
func (eic execInContainer) Output() ([]byte, error) {
|
||||
func (eic *execInContainer) Output() ([]byte, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (eic execInContainer) SetDir(dir string) {
|
||||
func (eic *execInContainer) SetDir(dir string) {
|
||||
//unimplemented
|
||||
}
|
||||
|
||||
func (eic execInContainer) SetStdin(in io.Reader) {
|
||||
func (eic *execInContainer) SetStdin(in io.Reader) {
|
||||
//unimplemented
|
||||
}
|
||||
|
||||
func (eic execInContainer) SetStdout(out io.Writer) {
|
||||
func (eic *execInContainer) SetStdout(out io.Writer) {
|
||||
eic.writer = out
|
||||
}
|
||||
|
||||
func (eic *execInContainer) SetStderr(out io.Writer) {
|
||||
eic.writer = out
|
||||
}
|
||||
|
||||
func (eic *execInContainer) SetEnv(env []string) {
|
||||
//unimplemented
|
||||
}
|
||||
|
||||
func (eic execInContainer) SetStderr(out io.Writer) {
|
||||
func (eic *execInContainer) Stop() {
|
||||
//unimplemented
|
||||
}
|
||||
|
||||
func (eic execInContainer) SetEnv(env []string) {
|
||||
//unimplemented
|
||||
func (eic *execInContainer) Start() error {
|
||||
data, err := eic.run()
|
||||
if eic.writer != nil {
|
||||
eic.writer.Write(data)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (eic execInContainer) Stop() {
|
||||
//unimplemented
|
||||
func (eic *execInContainer) Wait() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (eic execInContainer) Start() error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (eic execInContainer) Wait() error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (eic execInContainer) StdoutPipe() (io.ReadCloser, error) {
|
||||
func (eic *execInContainer) StdoutPipe() (io.ReadCloser, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
func (eic execInContainer) StderrPipe() (io.ReadCloser, error) {
|
||||
func (eic *execInContainer) StderrPipe() (io.ReadCloser, error) {
|
||||
return nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD
generated
vendored
@ -1,9 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -23,3 +20,10 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["ioutils_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
|
||||
)
|
||||
|
33
vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go
generated
vendored
@ -35,3 +35,36 @@ func (w *writeCloserWrapper) Close() error {
|
||||
func WriteCloserWrapper(w io.Writer) io.WriteCloser {
|
||||
return &writeCloserWrapper{w}
|
||||
}
|
||||
|
||||
// LimitWriter is a copy of the standard library ioutils.LimitReader,
|
||||
// applied to the writer interface.
|
||||
// LimitWriter returns a Writer that writes to w
|
||||
// but stops with EOF after n bytes.
|
||||
// The underlying implementation is a *LimitedWriter.
|
||||
func LimitWriter(w io.Writer, n int64) io.Writer { return &LimitedWriter{w, n} }
|
||||
|
||||
// A LimitedWriter writes to W but limits the amount of
|
||||
// data returned to just N bytes. Each call to Write
|
||||
// updates N to reflect the new amount remaining.
|
||||
// Write returns EOF when N <= 0 or when the underlying W returns EOF.
|
||||
type LimitedWriter struct {
|
||||
W io.Writer // underlying writer
|
||||
N int64 // max bytes remaining
|
||||
}
|
||||
|
||||
func (l *LimitedWriter) Write(p []byte) (n int, err error) {
|
||||
if l.N <= 0 {
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
truncated := false
|
||||
if int64(len(p)) > l.N {
|
||||
p = p[0:l.N]
|
||||
truncated = true
|
||||
}
|
||||
n, err = l.W.Write(p)
|
||||
l.N -= int64(n)
|
||||
if err == nil && truncated {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return
|
||||
}
|
||||
|
1
vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD
generated
vendored
@ -11,6 +11,7 @@ go_library(
|
||||
srcs = ["exec.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/probe/exec",
|
||||
deps = [
|
||||
"//pkg/kubelet/util/ioutils:go_default_library",
|
||||
"//pkg/probe:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
|
21
vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go
generated
vendored
21
vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go
generated
vendored
@ -17,10 +17,17 @@ limitations under the License.
|
||||
package exec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
"k8s.io/utils/exec"
|
||||
|
||||
"k8s.io/klog"
|
||||
"k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
maxReadLength = 10 * 1 << 10 // 10KB
|
||||
)
|
||||
|
||||
// New creates a Prober.
|
||||
@ -39,7 +46,17 @@ type execProber struct{}
|
||||
// from executing a command. Returns the Result status, command output, and
|
||||
// errors if any.
|
||||
func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) {
|
||||
data, err := e.CombinedOutput()
|
||||
var dataBuffer bytes.Buffer
|
||||
writer := ioutils.LimitWriter(&dataBuffer, maxReadLength)
|
||||
|
||||
e.SetStderr(writer)
|
||||
e.SetStdout(writer)
|
||||
err := e.Start()
|
||||
if err == nil {
|
||||
err = e.Wait()
|
||||
}
|
||||
data := dataBuffer.Bytes()
|
||||
|
||||
klog.V(4).Infof("Exec probe response: %q", string(data))
|
||||
if err != nil {
|
||||
exit, ok := err.(exec.ExitError)
|
||||
|
40
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go
generated
vendored
@ -51,17 +51,16 @@ func NewInterPodAffinityPriority(
|
||||
}
|
||||
|
||||
type podAffinityPriorityMap struct {
|
||||
// nodes contain all nodes that should be considered
|
||||
// nodes contain all nodes that should be considered.
|
||||
nodes []*v1.Node
|
||||
// counts store the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
counts map[string]*int64
|
||||
// counts store the so-far computed score for each node.
|
||||
counts []int64
|
||||
}
|
||||
|
||||
func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap {
|
||||
return &podAffinityPriorityMap{
|
||||
nodes: nodes,
|
||||
counts: make(map[string]*int64, len(nodes)),
|
||||
counts: make([]int64, len(nodes)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,9 +72,9 @@ func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefini
|
||||
}
|
||||
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector)
|
||||
if match {
|
||||
for _, node := range p.nodes {
|
||||
for i, node := range p.nodes {
|
||||
if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
|
||||
atomic.AddInt64(p.counts[node.Name], weight)
|
||||
atomic.AddInt64(&p.counts[i], weight)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -102,17 +101,11 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
|
||||
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
|
||||
|
||||
// priorityMap stores the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
// pm stores (1) all nodes that should be considered and (2) the so-far computed score for each node.
|
||||
pm := newPodAffinityPriorityMap(nodes)
|
||||
allNodeNames := make([]string, 0, len(nodeNameToInfo))
|
||||
lazyInit := hasAffinityConstraints || hasAntiAffinityConstraints
|
||||
for name := range nodeNameToInfo {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
// if pod has affinity defined, or target node has affinityPods
|
||||
if lazyInit || len(nodeNameToInfo[name].PodsWithAffinity()) != 0 {
|
||||
pm.counts[name] = new(int64)
|
||||
}
|
||||
}
|
||||
|
||||
// convert the topology key based weights to the node name based weights
|
||||
@ -216,25 +209,22 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if pm.counts[node.Name] == nil {
|
||||
continue
|
||||
for i := range nodes {
|
||||
if pm.counts[i] > maxCount {
|
||||
maxCount = pm.counts[i]
|
||||
}
|
||||
if *pm.counts[node.Name] > maxCount {
|
||||
maxCount = *pm.counts[node.Name]
|
||||
}
|
||||
if *pm.counts[node.Name] < minCount {
|
||||
minCount = *pm.counts[node.Name]
|
||||
if pm.counts[i] < minCount {
|
||||
minCount = pm.counts[i]
|
||||
}
|
||||
}
|
||||
|
||||
// calculate final priority score for each node
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
maxMinDiff := maxCount - minCount
|
||||
for _, node := range nodes {
|
||||
for i, node := range nodes {
|
||||
fScore := float64(0)
|
||||
if maxMinDiff > 0 && pm.counts[node.Name] != nil {
|
||||
fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount))
|
||||
if maxMinDiff > 0 {
|
||||
fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
if klog.V(10) {
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
@ -3,8 +3,8 @@ package version
|
||||
var (
|
||||
gitMajor = "1"
|
||||
gitMinor = "16"
|
||||
gitVersion = "v1.16.0-k3s.1"
|
||||
gitCommit = "6cded9539b673ea7c5467303ddb3ad5628f9bb8a"
|
||||
gitVersion = "v1.16.2-k3s.1"
|
||||
gitCommit = "b8b17ba55f20e590df507fce333dfee13ab438c6"
|
||||
gitTreeState = "clean"
|
||||
buildDate = "2019-09-27T21:33Z"
|
||||
buildDate = "2019-10-16T05:17Z"
|
||||
)
|
||||
|
18
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go
generated
vendored
18
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go
generated
vendored
@ -150,17 +150,23 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri
|
||||
|
||||
// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI.
|
||||
func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// if host doesn't exist, no need to detach
|
||||
klog.Warningf("azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached",
|
||||
nodeName, diskURI)
|
||||
return nil
|
||||
}
|
||||
klog.Warningf("failed to get azure instance id (%v)", err)
|
||||
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||
}
|
||||
|
||||
vmset, err := c.getNodeVMSet(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
|
||||
if err != nil {
|
||||
klog.Warningf("failed to get azure instance id (%v)", err)
|
||||
return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
|
||||
}
|
||||
|
||||
klog.V(2).Infof("detach %v from node %q", diskURI, nodeName)
|
||||
|
||||
// make the lock here as small as possible
|
||||
|
10
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go
generated
vendored
10
vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go
generated
vendored
@ -86,8 +86,9 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
|
||||
defer cancel()
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
if err = ss.deleteCacheForNode(vmName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
|
||||
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
|
||||
@ -157,8 +158,9 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
|
||||
defer cancel()
|
||||
|
||||
// Invalidate the cache right after updating
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
if err = ss.deleteCacheForNode(vmName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
|
||||
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
|
||||
|
45
vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
generated
vendored
45
vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go
generated
vendored
@ -104,18 +104,38 @@ const (
|
||||
clusterNameKey = "kubernetes-cluster-name"
|
||||
)
|
||||
|
||||
// GetLoadBalancer returns whether the specified load balancer exists, and
|
||||
// GetLoadBalancer returns whether the specified load balancer and its components exist, and
|
||||
// if so, what its status is.
|
||||
func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
|
||||
_, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false)
|
||||
// Since public IP is not a part of the load balancer on Azure,
|
||||
// there is a chance that we could orphan public IP resources while we delete the load blanacer (kubernetes/kubernetes#80571).
|
||||
// We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure.
|
||||
existsPip := func() bool {
|
||||
pipName, _, err := az.determinePublicIPName(clusterName, service)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
pipResourceGroup := az.getPublicIPAddressResourceGroup(service)
|
||||
_, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return existsPip
|
||||
}()
|
||||
|
||||
_, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
return nil, existsPip, err
|
||||
}
|
||||
if !exists {
|
||||
|
||||
// Return exists = false only if the load balancer and the public IP are not found on Azure
|
||||
if !existsLb && !existsPip {
|
||||
serviceName := getServiceName(service)
|
||||
klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// Return exists = true if either the load balancer or the public IP (or both) exists
|
||||
return status, true, nil
|
||||
}
|
||||
|
||||
@ -169,6 +189,10 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser
|
||||
|
||||
// UpdateLoadBalancer updates hosts under the specified load balancer.
|
||||
func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||
if !az.shouldUpdateLoadBalancer(clusterName, service) {
|
||||
klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name)
|
||||
return nil
|
||||
}
|
||||
_, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes)
|
||||
return err
|
||||
}
|
||||
@ -475,7 +499,7 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s
|
||||
return service.Spec.LoadBalancerIP, nil
|
||||
}
|
||||
|
||||
lbStatus, existsLb, err := az.GetLoadBalancer(ctx, clusterName, service)
|
||||
_, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -546,8 +570,12 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
|
||||
if ipv6 {
|
||||
pip.PublicIPAddressVersion = network.IPv6
|
||||
klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
|
||||
// static allocation on IPv6 on Azure is not allowed
|
||||
|
||||
pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Dynamic
|
||||
if az.useStandardLoadBalancer() {
|
||||
// standard sku must have static allocation method for ipv6
|
||||
pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Static
|
||||
}
|
||||
} else {
|
||||
pip.PublicIPAddressVersion = network.IPv4
|
||||
klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP)
|
||||
@ -1279,6 +1307,11 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service,
|
||||
return &sg, nil
|
||||
}
|
||||
|
||||
func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service) bool {
|
||||
_, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nil, false)
|
||||
return existsLb && service.ObjectMeta.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
func logSafe(s *string) string {
|
||||
if s == nil {
|
||||
return "(nil)"
|
||||
|
145
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
145
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
|
||||
@ -60,10 +61,8 @@ type scaleSet struct {
|
||||
// (e.g. master nodes) may not belong to any scale sets.
|
||||
availabilitySet VMSet
|
||||
|
||||
vmssCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
nodeNameToScaleSetMappingCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
vmssVMCache *timedCache
|
||||
availabilitySetNodesCache *timedCache
|
||||
}
|
||||
|
||||
// newScaleSet creates a new scaleSet.
|
||||
@ -74,22 +73,12 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
availabilitySet: newAvailabilitySet(az),
|
||||
}
|
||||
|
||||
ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssCache, err = ss.newVmssCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss.vmssVMCache, err = ss.newVmssVMCache()
|
||||
ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -99,39 +88,46 @@ func newScaleSet(az *Cloud) (VMSet, error) {
|
||||
|
||||
// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache.
|
||||
// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets.
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
instanceID, err = getScaleSetVMInstanceID(nodeName)
|
||||
func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
virtualMachines := cached.(*sync.Map)
|
||||
if vm, ok := virtualMachines.Load(nodeName); ok {
|
||||
result := vm.(*vmssVirtualMachinesEntry)
|
||||
return result.vmssName, result.instanceID, result.virtualMachine, nil
|
||||
}
|
||||
|
||||
return "", "", nil, nil
|
||||
}
|
||||
|
||||
_, err := getScaleSetVMInstanceID(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
ssName, err = ss.getScaleSetNameByNodeName(nodeName)
|
||||
vmssName, instanceID, vm, err := getter(nodeName)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
return "", "", nil, err
|
||||
}
|
||||
if vm != nil {
|
||||
return vmssName, instanceID, vm, nil
|
||||
}
|
||||
|
||||
if ssName == "" {
|
||||
return "", "", vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
resourceGroup, err := ss.GetNodeResourceGroup(nodeName)
|
||||
klog.V(3).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName)
|
||||
ss.vmssVMCache.Delete(vmssVirtualMachinesKey)
|
||||
vmssName, instanceID, vm, err = getter(nodeName)
|
||||
if err != nil {
|
||||
return "", "", vm, err
|
||||
return "", "", nil, err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName)
|
||||
key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
cachedVM, err := ss.vmssVMCache.Get(key)
|
||||
if err != nil {
|
||||
return ssName, instanceID, vm, err
|
||||
if vm == nil {
|
||||
return "", "", nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
klog.Errorf("Can't find node (%q) in any scale sets", nodeName)
|
||||
return ssName, instanceID, vm, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
return vmssName, instanceID, vm, nil
|
||||
}
|
||||
|
||||
// GetPowerStatusByNodeName returns the power state of the specified node.
|
||||
@ -158,20 +154,49 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er
|
||||
|
||||
// getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache.
|
||||
// The node must belong to one of scale sets.
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) {
|
||||
vmName := ss.makeVmssVMName(scaleSetName, instanceID)
|
||||
key := buildVmssCacheKey(resourceGroup, vmName)
|
||||
cachedVM, err := ss.vmssVMCache.Get(key)
|
||||
func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (*compute.VirtualMachineScaleSetVM, error) {
|
||||
getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
virtualMachines := cached.(*sync.Map)
|
||||
virtualMachines.Range(func(key, value interface{}) bool {
|
||||
vmEntry := value.(*vmssVirtualMachinesEntry)
|
||||
if strings.EqualFold(vmEntry.resourceGroup, resourceGroup) &&
|
||||
strings.EqualFold(vmEntry.vmssName, scaleSetName) &&
|
||||
strings.EqualFold(vmEntry.instanceID, instanceID) {
|
||||
vm = vmEntry.virtualMachine
|
||||
found = true
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return vm, found, nil
|
||||
}
|
||||
|
||||
vm, found, err := getter()
|
||||
if err != nil {
|
||||
return vm, err
|
||||
return nil, err
|
||||
}
|
||||
if found {
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
if cachedVM == nil {
|
||||
klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID)
|
||||
return vm, cloudprovider.InstanceNotFound
|
||||
klog.V(3).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID)
|
||||
ss.vmssVMCache.Delete(vmssVirtualMachinesKey)
|
||||
vm, found, err = getter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
|
||||
return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil
|
||||
return vm, nil
|
||||
}
|
||||
|
||||
// GetInstanceIDByNodeName gets the cloud provider ID by node name.
|
||||
@ -463,9 +488,15 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ssNames := make([]string, len(allScaleSets))
|
||||
for i := range allScaleSets {
|
||||
ssNames[i] = *(allScaleSets[i].Name)
|
||||
ssNames := make([]string, 0)
|
||||
for _, vmss := range allScaleSets {
|
||||
name := *vmss.Name
|
||||
if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 {
|
||||
klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name)
|
||||
continue
|
||||
}
|
||||
|
||||
ssNames = append(ssNames, name)
|
||||
}
|
||||
|
||||
return ssNames, nil
|
||||
@ -500,7 +531,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
|
||||
}
|
||||
|
||||
nodeName := nodes[nx].Name
|
||||
ssName, err := ss.getScaleSetNameByNodeName(nodeName)
|
||||
ssName, _, _, err := ss.getVmssVM(nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -599,7 +630,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err
|
||||
return network.Interface{}, err
|
||||
}
|
||||
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm)
|
||||
primaryInterfaceID, err := ss.getPrimaryInterfaceID(*vm)
|
||||
if err != nil {
|
||||
klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err)
|
||||
return network.Interface{}, err
|
||||
@ -816,8 +847,9 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
|
||||
}
|
||||
|
||||
// Invalidate the cache since we would update it.
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
if err = ss.deleteCacheForNode(vmName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update vmssVM with backoff.
|
||||
ctx, cancel := getContextWithCancel()
|
||||
@ -1094,8 +1126,9 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa
|
||||
}
|
||||
|
||||
// Invalidate the cache since we would update it.
|
||||
key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID))
|
||||
defer ss.vmssVMCache.Delete(key)
|
||||
if err = ss.deleteCacheForNode(nodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update vmssVM with backoff.
|
||||
ctx, cancel := getContextWithCancel()
|
||||
|
176
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go
generated
vendored
176
vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go
generated
vendored
@ -21,8 +21,12 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
@ -31,18 +35,19 @@ var (
|
||||
vmssNameSeparator = "_"
|
||||
vmssCacheSeparator = "#"
|
||||
|
||||
nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey"
|
||||
availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
|
||||
|
||||
vmssCacheTTL = time.Minute
|
||||
vmssVMCacheTTL = time.Minute
|
||||
availabilitySetNodesCacheTTL = 5 * time.Minute
|
||||
nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute
|
||||
availabilitySetNodesCacheTTL = 15 * time.Minute
|
||||
vmssVirtualMachinesTTL = 10 * time.Minute
|
||||
)
|
||||
|
||||
// nodeNameToScaleSetMapping maps nodeName to scaleSet name.
|
||||
// The map is required because vmss nodeName is not equal to its vmName.
|
||||
type nodeNameToScaleSetMapping map[string]string
|
||||
type vmssVirtualMachinesEntry struct {
|
||||
resourceGroup string
|
||||
vmssName string
|
||||
instanceID string
|
||||
virtualMachine *compute.VirtualMachineScaleSetVM
|
||||
}
|
||||
|
||||
func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string {
|
||||
return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID)
|
||||
@ -62,32 +67,9 @@ func extractVmssVMName(name string) (string, string, error) {
|
||||
return ssName, instanceID, nil
|
||||
}
|
||||
|
||||
// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups
|
||||
// will be excluded from LB backends.
|
||||
func (ss *scaleSet) newVmssCache() (*timedCache, error) {
|
||||
func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
localCache := make(nodeNameToScaleSetMapping)
|
||||
localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
|
||||
|
||||
allResourceGroups, err := ss.GetResourceGroups()
|
||||
if err != nil {
|
||||
@ -106,14 +88,20 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, vm := range vms {
|
||||
for i := range vms {
|
||||
vm := vms[i]
|
||||
if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
|
||||
klog.Warningf("failed to get computerName for vmssVM (%q)", ssName)
|
||||
continue
|
||||
}
|
||||
|
||||
computerName := strings.ToLower(*vm.OsProfile.ComputerName)
|
||||
localCache[computerName] = ssName
|
||||
localCache.Store(computerName, &vmssVirtualMachinesEntry{
|
||||
resourceGroup: resourceGroup,
|
||||
vmssName: ssName,
|
||||
instanceID: to.String(vm.InstanceID),
|
||||
virtualMachine: &vm,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -121,7 +109,18 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) {
|
||||
return localCache, nil
|
||||
}
|
||||
|
||||
return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter)
|
||||
return newTimedcache(vmssVirtualMachinesTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
|
||||
cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
virtualMachines := cached.(*sync.Map)
|
||||
virtualMachines.Delete(nodeName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
@ -151,109 +150,6 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) {
|
||||
return newTimedcache(availabilitySetNodesCacheTTL, getter)
|
||||
}
|
||||
|
||||
func buildVmssCacheKey(resourceGroup, name string) string {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
return fmt.Sprintf("%s%s%s", strings.ToLower(resourceGroup), vmssCacheSeparator, name)
|
||||
}
|
||||
|
||||
func extractVmssCacheKey(key string) (string, string, error) {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
keyItems := strings.Split(key, vmssCacheSeparator)
|
||||
if len(keyItems) != 2 {
|
||||
return "", "", fmt.Errorf("key %q is not in format '<resourceGroup>#<vmName>'", key)
|
||||
}
|
||||
|
||||
resourceGroup := keyItems[0]
|
||||
vmName := keyItems[1]
|
||||
return resourceGroup, vmName, nil
|
||||
}
|
||||
|
||||
func (ss *scaleSet) newVmssVMCache() (*timedCache, error) {
|
||||
getter := func(key string) (interface{}, error) {
|
||||
// key is composed of <resourceGroup>#<vmName>
|
||||
resourceGroup, vmName, err := extractVmssCacheKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// vmName's format is 'scaleSetName_instanceID'
|
||||
ssName, instanceID, err := extractVmssVMName(vmName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Not found, the VM doesn't belong to any known scale sets.
|
||||
if ssName == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ctx, cancel := getContextWithCancel()
|
||||
defer cancel()
|
||||
result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID)
|
||||
exists, message, realErr := checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
|
||||
if !exists {
|
||||
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get instanceView for vmssVM.
|
||||
if result.InstanceView == nil {
|
||||
viewCtx, viewCancel := getContextWithCancel()
|
||||
defer viewCancel()
|
||||
view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID)
|
||||
// It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again.
|
||||
exists, message, realErr = checkResourceExistsFromError(err)
|
||||
if realErr != nil {
|
||||
return nil, realErr
|
||||
}
|
||||
if !exists {
|
||||
klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result.InstanceView = &view
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return newTimedcache(vmssVMCacheTTL, getter)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) {
|
||||
getScaleSetName := func(nodeName string) (string, error) {
|
||||
nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
realMapping := nodeNameMapping.(nodeNameToScaleSetMapping)
|
||||
if ssName, ok := realMapping[nodeName]; ok {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ssName, err := getScaleSetName(nodeName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ssName != "" {
|
||||
return ssName, nil
|
||||
}
|
||||
|
||||
// ssName is still not found, it is likely that new Nodes are created.
|
||||
// Force refresh the cache and try again.
|
||||
ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey)
|
||||
return getScaleSetName(nodeName)
|
||||
}
|
||||
|
||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) {
|
||||
cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey)
|
||||
if err != nil {
|
||||
|
2
vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go
generated
vendored
2
vendor/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go
generated
vendored
@ -1132,7 +1132,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error {
|
||||
return fmt.Errorf("cannot create directory %s: %s", currentPath, err)
|
||||
}
|
||||
// Dive into the created directory
|
||||
childFD, err := syscall.Openat(parentFD, dir, nofollowFlags, 0)
|
||||
childFD, err = syscall.Openat(parentFD, dir, nofollowFlags, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %s: %s", currentPath, err)
|
||||
}
|
||||
|
44
vendor/modules.txt
vendored
44
vendor/modules.txt
vendored
@ -1084,9 +1084,9 @@ gopkg.in/square/go-jose.v2/cipher
|
||||
gopkg.in/square/go-jose.v2/json
|
||||
# gopkg.in/warnings.v0 v0.1.1
|
||||
gopkg.in/warnings.v0
|
||||
# gopkg.in/yaml.v2 v2.2.2
|
||||
# gopkg.in/yaml.v2 v2.2.4
|
||||
gopkg.in/yaml.v2
|
||||
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.0-k3s.1
|
||||
# k8s.io/api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.2-k3s.1
|
||||
k8s.io/api/core/v1
|
||||
k8s.io/api/apps/v1
|
||||
k8s.io/api/admissionregistration/v1
|
||||
@ -1128,7 +1128,7 @@ k8s.io/api/storage/v1beta1
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
k8s.io/api/imagepolicy/v1alpha1
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.0-k3s.1
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.2-k3s.1
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
|
||||
k8s.io/apiextensions-apiserver/pkg/apiserver
|
||||
@ -1176,7 +1176,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio
|
||||
k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1
|
||||
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1
|
||||
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1
|
||||
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.0-k3s.1
|
||||
# k8s.io/apimachinery v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.2-k3s.1
|
||||
k8s.io/apimachinery/pkg/util/json
|
||||
k8s.io/apimachinery/pkg/util/net
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
@ -1238,7 +1238,7 @@ k8s.io/apimachinery/pkg/api/meta/table
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1beta1/validation
|
||||
k8s.io/apimachinery/pkg/runtime/serializer/yaml
|
||||
k8s.io/apimachinery/pkg/util/duration
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.0-k3s.1
|
||||
# k8s.io/apiserver v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.2-k3s.1
|
||||
k8s.io/apiserver/pkg/authentication/authenticator
|
||||
k8s.io/apiserver/pkg/endpoints/request
|
||||
k8s.io/apiserver/pkg/server
|
||||
@ -1351,7 +1351,7 @@ k8s.io/apiserver/pkg/registry/generic/rest
|
||||
k8s.io/apiserver/pkg/storage/value/encrypt/envelope/v1beta1
|
||||
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission
|
||||
k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.0-k3s.1
|
||||
# k8s.io/cli-runtime v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.2-k3s.1
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
@ -1364,7 +1364,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
||||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.0-k3s.1
|
||||
# k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.2-k3s.1
|
||||
k8s.io/client-go/tools/clientcmd
|
||||
k8s.io/client-go/util/cert
|
||||
k8s.io/client-go/kubernetes
|
||||
@ -1550,20 +1550,20 @@ k8s.io/client-go/listers/node/v1alpha1
|
||||
k8s.io/client-go/listers/scheduling/v1alpha1
|
||||
k8s.io/client-go/listers/scheduling/v1beta1
|
||||
k8s.io/client-go/listers/storage/v1alpha1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.0-k3s.1
|
||||
# k8s.io/cloud-provider v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.2-k3s.1
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/volume/helpers
|
||||
k8s.io/cloud-provider/volume
|
||||
k8s.io/cloud-provider/service/helpers
|
||||
k8s.io/cloud-provider/volume/errors
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.0-k3s.1
|
||||
# k8s.io/cluster-bootstrap v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.2-k3s.1
|
||||
k8s.io/cluster-bootstrap/token/api
|
||||
k8s.io/cluster-bootstrap/util/secrets
|
||||
k8s.io/cluster-bootstrap/util/tokens
|
||||
k8s.io/cluster-bootstrap/token/jws
|
||||
k8s.io/cluster-bootstrap/token/util
|
||||
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.0-k3s.1
|
||||
# k8s.io/code-generator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.2-k3s.1
|
||||
k8s.io/code-generator/cmd/client-gen/args
|
||||
k8s.io/code-generator/cmd/client-gen/generators
|
||||
k8s.io/code-generator/cmd/client-gen/types
|
||||
@ -1578,7 +1578,7 @@ k8s.io/code-generator/pkg/util
|
||||
k8s.io/code-generator/cmd/client-gen/generators/fake
|
||||
k8s.io/code-generator/cmd/client-gen/generators/scheme
|
||||
k8s.io/code-generator/pkg/namer
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.0-k3s.1
|
||||
# k8s.io/component-base v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.2-k3s.1
|
||||
k8s.io/component-base/logs
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/metrics/prometheus/restclient
|
||||
@ -1591,10 +1591,10 @@ k8s.io/component-base/metrics
|
||||
k8s.io/component-base/featuregate
|
||||
k8s.io/component-base/config/v1alpha1
|
||||
k8s.io/component-base/config/validation
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.0-k3s.1
|
||||
# k8s.io/cri-api v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.2-k3s.1
|
||||
k8s.io/cri-api/pkg/apis/runtime/v1alpha2
|
||||
k8s.io/cri-api/pkg/apis
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.0-k3s.1
|
||||
# k8s.io/csi-translation-lib v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.2-k3s.1
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
k8s.io/csi-translation-lib
|
||||
# k8s.io/gengo v0.0.0-20190822140433-26a664648505
|
||||
@ -1609,7 +1609,7 @@ k8s.io/gengo/examples/set-gen/sets
|
||||
k8s.io/heapster/metrics/api/v1/types
|
||||
# k8s.io/klog v0.4.0
|
||||
k8s.io/klog
|
||||
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.0-k3s.1
|
||||
# k8s.io/kube-aggregator v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.2-k3s.1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/helper
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1
|
||||
@ -1637,7 +1637,7 @@ k8s.io/kube-aggregator/pkg/client/informers/externalversions/apiregistration/v1b
|
||||
k8s.io/kube-aggregator/pkg/registry/apiservice
|
||||
k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1beta1
|
||||
k8s.io/kube-aggregator/pkg/apis/apiregistration/validation
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.0-k3s.1
|
||||
# k8s.io/kube-controller-manager v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.2-k3s.1
|
||||
k8s.io/kube-controller-manager/config/v1alpha1
|
||||
# k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf
|
||||
k8s.io/kube-openapi/pkg/builder
|
||||
@ -1648,11 +1648,11 @@ k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
k8s.io/kube-openapi/pkg/schemaconv
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.0-k3s.1
|
||||
# k8s.io/kube-proxy v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.2-k3s.1
|
||||
k8s.io/kube-proxy/config/v1alpha1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.0-k3s.1
|
||||
# k8s.io/kube-scheduler v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.2-k3s.1
|
||||
k8s.io/kube-scheduler/config/v1alpha1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.0-k3s.1
|
||||
# k8s.io/kubectl v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.2-k3s.1
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/cmd
|
||||
k8s.io/kubectl/pkg/cmd/annotate
|
||||
@ -1729,9 +1729,9 @@ k8s.io/kubectl/pkg/util/fieldpath
|
||||
k8s.io/kubectl/pkg/util/qos
|
||||
k8s.io/kubectl/pkg/util/resource
|
||||
k8s.io/kubectl/pkg/util/storage
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.0-k3s.1
|
||||
# k8s.io/kubelet v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.2-k3s.1
|
||||
k8s.io/kubelet/config/v1beta1
|
||||
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.0-k3s.1
|
||||
# k8s.io/kubernetes v1.16.0 => github.com/rancher/kubernetes v1.16.2-k3s.1
|
||||
k8s.io/kubernetes/cmd/hyperkube
|
||||
k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1
|
||||
k8s.io/kubernetes/pkg/kubelet/util
|
||||
@ -2447,7 +2447,7 @@ k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config/v1alpha1
|
||||
k8s.io/kubernetes/pkg/apis/abac/v1beta1
|
||||
k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear
|
||||
k8s.io/kubernetes/pkg/util/maps
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.0-k3s.1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.2-k3s.1
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/azure
|
||||
k8s.io/legacy-cloud-providers/gce
|
||||
@ -2457,7 +2457,7 @@ k8s.io/legacy-cloud-providers/vsphere/vclib
|
||||
k8s.io/legacy-cloud-providers/azure/auth
|
||||
k8s.io/legacy-cloud-providers/openstack/util/mount
|
||||
k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.0-k3s.1
|
||||
# k8s.io/metrics v0.0.0 => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.2-k3s.1
|
||||
k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
|
||||
k8s.io/metrics/pkg/client/custom_metrics
|
||||
k8s.io/metrics/pkg/client/external_metrics
|
||||
|
Loading…
Reference in New Issue
Block a user