From 82ba4f077e01bf5be20cfd9cbe74949f8eea611d Mon Sep 17 00:00:00 2001 From: Kevin Date: Wed, 4 May 2016 06:50:31 +0000 Subject: [PATCH] implement inter pod topological affinity and anti-affinity --- cmd/integration/integration.go | 2 +- docs/admin/kube-scheduler.md | 4 +- examples/examples_test.go | 1 - hack/verify-flags/known-flags.txt | 2 + pkg/api/deep_copy_generated.go | 103 + pkg/api/types.generated.go | 1415 ++++++++++++- pkg/api/types.go | 112 +- pkg/api/unversioned/validation/validation.go | 34 +- .../unversioned/validation/validation_test.go | 85 + pkg/api/unversioned/well_known_labels.go | 13 +- pkg/api/v1/conversion_generated.go | 264 +++ pkg/api/v1/deep_copy_generated.go | 103 + pkg/api/v1/generated.pb.go | 1757 ++++++++++++----- pkg/api/v1/generated.proto | 109 +- pkg/api/v1/types.generated.go | 1415 ++++++++++++- pkg/api/v1/types.go | 102 +- pkg/api/v1/types_swagger_doc_generated.go | 47 +- pkg/api/validation/validation.go | 130 +- pkg/api/validation/validation_test.go | 468 ++++- pkg/apis/apps/validation/validation.go | 2 +- .../componentconfig/deep_copy_generated.go | 2 + pkg/apis/componentconfig/types.generated.go | 270 ++- pkg/apis/componentconfig/types.go | 6 + .../v1alpha1/conversion_generated.go | 4 + .../v1alpha1/deep_copy_generated.go | 2 + pkg/apis/componentconfig/v1alpha1/defaults.go | 6 + pkg/apis/componentconfig/v1alpha1/types.go | 6 + .../cmd/kube-scheduler/app/options/options.go | 4 + plugin/cmd/kube-scheduler/app/server.go | 2 +- .../scheduler/algorithm/predicates/error.go | 1 + .../algorithm/predicates/predicates.go | 201 +- .../algorithm/predicates/predicates_test.go | 672 +++++++ .../algorithm/priorities/interpod_affinity.go | 216 ++ .../priorities/interpod_affinity_test.go | 688 +++++++ .../algorithm/priorities/util/non_zero.go | 87 +- .../defaults/compatibility_test.go | 3 +- .../algorithmprovider/defaults/defaults.go | 26 +- plugin/pkg/scheduler/factory/factory.go | 46 +- plugin/pkg/scheduler/factory/factory_test.go | 72 +- plugin/pkg/scheduler/factory/plugins.go | 18 +- test/component/scheduler/perf/util.go | 2 +- .../pod-with-node-affinity.yaml | 0 .../node-selection/pod-with-pod-affinity.yaml | 37 + test/e2e/scheduler_predicates.go | 557 +++++- test/integration/extender_test.go | 2 +- test/integration/scheduler_test.go | 8 +- 46 files changed, 8297 insertions(+), 809 deletions(-) create mode 100644 pkg/api/unversioned/validation/validation_test.go create mode 100644 plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go create mode 100644 plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go rename {docs/user-guide => test/e2e}/node-selection/pod-with-node-affinity.yaml (100%) create mode 100644 test/e2e/node-selection/pod-with-pod-affinity.yaml diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index da331d1af580a..9d6d212614c7b 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -183,7 +183,7 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string handler.delegate = m.Handler // Scheduler - schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName) + schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { glog.Fatalf("Couldn't create scheduler config: %v", err) diff --git a/docs/admin/kube-scheduler.md b/docs/admin/kube-scheduler.md index c11f80179a48f..892a8a1f70ac5 100644 --- a/docs/admin/kube-scheduler.md +++ b/docs/admin/kube-scheduler.md @@ -56,7 +56,9 @@ kube-scheduler ``` --address="0.0.0.0": The IP address to serve on (set to 0.0.0.0 for all interfaces) --algorithm-provider="DefaultProvider": The scheduling algorithm provider to use, one of: DefaultProvider + --failure-domains="kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region": Indicate the "all topologies" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. --google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication. + --hard-pod-affinity-symmetric-weight=1: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. --kube-api-burst=100: Burst to use while talking with kubernetes apiserver --kube-api-content-type="": ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now. --kube-api-qps=50: QPS to use while talking with kubernetes apiserver @@ -73,7 +75,7 @@ kube-scheduler --scheduler-name="default-scheduler": Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name' ``` -###### Auto generated by spf13/cobra on 21-Apr-2016 +###### Auto generated by spf13/cobra on 5-May-2016 diff --git a/examples/examples_test.go b/examples/examples_test.go index 5dc489b14ffbe..2da74d9f88dbe 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -333,7 +333,6 @@ func TestExampleObjectSchemas(t *testing.T) { }, "../docs/user-guide/node-selection": { "pod": &api.Pod{}, - "pod-with-node-affinity": &api.Pod{}, }, "../examples/openshift-origin": { "openshift-origin-namespace": &api.Namespace{}, diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 9cb1530c230ad..e19caed43e57f 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -129,6 +129,7 @@ experimental-prefix external-hostname external-ip failover-timeout +failure-domains fake-clientset file-check-frequency file-suffix @@ -153,6 +154,7 @@ google-json-key grace-period ha-domain hairpin-mode +hard-pod-affinity-symmetric-weight healthz-bind-address healthz-port horizontal-pod-autoscaler-sync-period diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index d898d1f0c5650..7dc3cf37171e3 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -129,6 +129,9 @@ func init() { DeepCopy_api_PersistentVolumeSpec, DeepCopy_api_PersistentVolumeStatus, DeepCopy_api_Pod, + DeepCopy_api_PodAffinity, + DeepCopy_api_PodAffinityTerm, + DeepCopy_api_PodAntiAffinity, DeepCopy_api_PodAttachOptions, DeepCopy_api_PodCondition, DeepCopy_api_PodExecOptions, @@ -175,6 +178,7 @@ func init() { DeepCopy_api_Volume, DeepCopy_api_VolumeMount, DeepCopy_api_VolumeSource, + DeepCopy_api_WeightedPodAffinityTerm, ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) @@ -199,6 +203,24 @@ func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) err } else { out.NodeAffinity = nil } + if in.PodAffinity != nil { + in, out := in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil + } return nil } @@ -1964,6 +1986,79 @@ func DeepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error { return nil } +func DeepCopy_api_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func DeepCopy_api_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error { + if in.LabelSelector != nil { + in, out := in.LabelSelector, &out.LabelSelector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.LabelSelector = nil + } + if in.Namespaces != nil { + in, out := in.Namespaces, &out.Namespaces + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Namespaces = nil + } + out.TopologyKey = in.TopologyKey + return nil +} + +func DeepCopy_api_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + func DeepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err @@ -3037,3 +3132,11 @@ func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion } return nil } + +func DeepCopy_api_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error { + out.Weight = in.Weight + if err := DeepCopy_api_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil +} diff --git a/pkg/api/types.generated.go b/pkg/api/types.generated.go index ee218b13f265c..c832d7ca359fc 100644 --- a/pkg/api/types.generated.go +++ b/pkg/api/types.generated.go @@ -22735,13 +22735,15 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(3) } else { yynn2 = 0 for _, b := range yyq2 { @@ -22775,6 +22777,52 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -22847,6 +22895,28 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -22858,42 +22928,1105 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv5 := &x.PodAffinityTerm + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv8 := &x.PodAffinityTerm + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() } - if yyb5 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } + x.Namespaces = nil } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) + yyv12 := &x.Namespaces + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) } - x.NodeAffinity.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb5 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb5 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -51595,6 +52728,244 @@ func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequir } } +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) diff --git a/pkg/api/types.go b/pkg/api/types.go index 31fd337b8cdd1..f06cbcc9bf22b 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1159,11 +1159,109 @@ const ( NodeSelectorOpLt NodeSelectorOperator = "Lt" ) -// Affinity is a group of affinity scheduling rules, currently -// only node affinity, but in the future also inter-pod affinity. +// Affinity is a group of affinity scheduling rules. type Affinity struct { // Describes node affinity scheduling rules for the pod. NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + PodAffinity *PodAffinity `json:"podAffinity,omitempty"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int `json:"weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // nil list means "this pod's namespace," empty list means "all namespaces" + // The json tag here is not "omitempty" since we need to distinguish nil and empty. + // See https://golang.org/pkg/encoding/json/#Marshal for more details. + Namespaces []string `json:"namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey,omitempty"` } // Node affinity is a group of node affinity scheduling rules. @@ -2604,4 +2702,14 @@ type RangeAllocation struct { const ( // "default-scheduler" is the name of default scheduler. DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion ) diff --git a/pkg/api/unversioned/validation/validation.go b/pkg/api/unversioned/validation/validation.go index f42f37b048255..e0b4f17560dd7 100644 --- a/pkg/api/unversioned/validation/validation.go +++ b/pkg/api/unversioned/validation/validation.go @@ -17,17 +17,24 @@ limitations under the License. package validation import ( + "fmt" + "k8s.io/kubernetes/pkg/api/unversioned" - apivalidation "k8s.io/kubernetes/pkg/api/validation" + "k8s.io/kubernetes/pkg/util/validation" "k8s.io/kubernetes/pkg/util/validation/field" ) +var ( + labelValueErrorMsg string = fmt.Sprintf(`must have at most %d characters, matching regex %s: e.g. "MyValue" or ""`, validation.LabelValueMaxLength, validation.LabelValueFmt) + qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, validation.QualifiedNameMaxLength, validation.QualifiedNameFmt, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt) +) + func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ps == nil { return allErrs } - allErrs = append(allErrs, apivalidation.ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) for i, expr := range ps.MatchExpressions { allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) } @@ -48,6 +55,27 @@ func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, f default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) } - allErrs = append(allErrs, apivalidation.ValidateLabelName(sr.Key, fldPath.Child("key"))...) + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, qualifiedNameErrorMsg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + if !validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, labelValueErrorMsg)) + } + } return allErrs } diff --git a/pkg/api/unversioned/validation/validation_test.go b/pkg/api/unversioned/validation/validation_test.go new file mode 100644 index 0000000000000..26be6fb580219 --- /dev/null +++ b/pkg/api/unversioned/validation/validation_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + "testing" + + "k8s.io/kubernetes/pkg/util/validation/field" +) + +func TestValidateLabels(t *testing.T) { + successCases := []map[string]string{ + {"simple": "bar"}, + {"now-with-dashes": "bar"}, + {"1-starts-with-num": "bar"}, + {"1234": "bar"}, + {"simple/simple": "bar"}, + {"now-with-dashes/simple": "bar"}, + {"now-with-dashes/now-with-dashes": "bar"}, + {"now.with.dots/simple": "bar"}, + {"now-with.dashes-and.dots/simple": "bar"}, + {"1-num.2-num/3-num": "bar"}, + {"1234/5678": "bar"}, + {"1.2.3.4/5678": "bar"}, + {"UpperCaseAreOK123": "bar"}, + {"goodvalue": "123_-.BaR"}, + } + for i := range successCases { + errs := ValidateLabels(successCases[i], field.NewPath("field")) + if len(errs) != 0 { + t.Errorf("case[%d] expected success, got %#v", i, errs) + } + } + + labelNameErrorCases := []map[string]string{ + {"nospecialchars^=@": "bar"}, + {"cantendwithadash-": "bar"}, + {"only/one/slash": "bar"}, + {strings.Repeat("a", 254): "bar"}, + } + for i := range labelNameErrorCases { + errs := ValidateLabels(labelNameErrorCases[i], field.NewPath("field")) + if len(errs) != 1 { + t.Errorf("case[%d] expected failure", i) + } else { + detail := errs[0].Detail + if detail != qualifiedNameErrorMsg { + t.Errorf("error detail %s should be equal %s", detail, qualifiedNameErrorMsg) + } + } + } + + labelValueErrorCases := []map[string]string{ + {"toolongvalue": strings.Repeat("a", 64)}, + {"backslashesinvalue": "some\\bad\\value"}, + {"nocommasallowed": "bad,value"}, + {"strangecharsinvalue": "?#$notsogood"}, + } + for i := range labelValueErrorCases { + errs := ValidateLabels(labelValueErrorCases[i], field.NewPath("field")) + if len(errs) != 1 { + t.Errorf("case[%d] expected failure", i) + } else { + detail := errs[0].Detail + if detail != labelValueErrorMsg { + t.Errorf("error detail %s should be equal %s", detail, labelValueErrorMsg) + } + } + } +} diff --git a/pkg/api/unversioned/well_known_labels.go b/pkg/api/unversioned/well_known_labels.go index 6c163b784aacc..2472942ab02db 100644 --- a/pkg/api/unversioned/well_known_labels.go +++ b/pkg/api/unversioned/well_known_labels.go @@ -16,7 +16,12 @@ limitations under the License. package unversioned -const LabelHostname = "kubernetes.io/hostname" -const LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" -const LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" -const LabelInstanceType = "beta.kubernetes.io/instance-type" +const ( + // If you add a new topology domain here, also consider adding it to the set of default values + // for the scheduler's --failure-domain command-line argument. + LabelHostname = "kubernetes.io/hostname" + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" +) diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index 20541c27310c1..0007bf008efcf 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -220,6 +220,12 @@ func init() { Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, Convert_v1_Pod_To_api_Pod, Convert_api_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_api_PodAffinity, + Convert_api_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, + Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, + Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, Convert_v1_PodAttachOptions_To_api_PodAttachOptions, Convert_api_PodAttachOptions_To_v1_PodAttachOptions, Convert_v1_PodCondition_To_api_PodCondition, @@ -312,6 +318,8 @@ func init() { Convert_api_VolumeMount_To_v1_VolumeMount, Convert_v1_VolumeSource_To_api_VolumeSource, Convert_api_VolumeSource_To_v1_VolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, + Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, ); err != nil { // if one of the conversion functions is malformed, detect it immediately. panic(err) @@ -361,6 +369,24 @@ func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s } else { out.NodeAffinity = nil } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(api.PodAffinity) + if err := Convert_v1_PodAffinity_To_api_PodAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(api.PodAntiAffinity) + if err := Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil + } return nil } @@ -381,6 +407,24 @@ func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s } else { out.NodeAffinity = nil } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := Convert_api_PodAffinity_To_v1_PodAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(*in, *out, s); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil + } return nil } @@ -5010,6 +5054,196 @@ func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) er return nil } +func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*PodAffinity))(in) + } + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]api.PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]api.WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) +} + +func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.PodAffinity))(in) + } + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*PodAffinityTerm))(in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(unversioned.LabelSelector) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.LabelSelector = nil + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } else { + out.Namespaces = nil + } + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) +} + +func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.PodAffinityTerm))(in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(unversioned.LabelSelector) + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.LabelSelector = nil + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } else { + out.Namespaces = nil + } + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*PodAntiAffinity))(in) + } + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]api.PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]api.WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) +} + +func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.PodAntiAffinity))(in) + } + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodAttachOptions))(in) @@ -7523,3 +7757,33 @@ func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out * func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) } + +func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*WeightedPodAffinityTerm))(in) + } + out.Weight = int(in.Weight) + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.WeightedPodAffinityTerm))(in) + } + out.Weight = int32(in.Weight) + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index 961b6962f1ecf..5993baefbf72b 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -126,6 +126,9 @@ func init() { DeepCopy_v1_PersistentVolumeSpec, DeepCopy_v1_PersistentVolumeStatus, DeepCopy_v1_Pod, + DeepCopy_v1_PodAffinity, + DeepCopy_v1_PodAffinityTerm, + DeepCopy_v1_PodAntiAffinity, DeepCopy_v1_PodAttachOptions, DeepCopy_v1_PodCondition, DeepCopy_v1_PodExecOptions, @@ -172,6 +175,7 @@ func init() { DeepCopy_v1_Volume, DeepCopy_v1_VolumeMount, DeepCopy_v1_VolumeSource, + DeepCopy_v1_WeightedPodAffinityTerm, ); err != nil { // if one of the deep copy functions is malformed, detect it immediately. panic(err) @@ -196,6 +200,24 @@ func DeepCopy_v1_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) erro } else { out.NodeAffinity = nil } + if in.PodAffinity != nil { + in, out := in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAffinity = nil + } + if in.PodAntiAffinity != nil { + in, out := in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } else { + out.PodAntiAffinity = nil + } return nil } @@ -1911,6 +1933,79 @@ func DeepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error { return nil } +func DeepCopy_v1_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + +func DeepCopy_v1_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error { + if in.LabelSelector != nil { + in, out := in.LabelSelector, &out.LabelSelector + *out = new(unversioned.LabelSelector) + if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { + return err + } + } else { + out.LabelSelector = nil + } + if in.Namespaces != nil { + in, out := in.Namespaces, &out.Namespaces + *out = make([]string, len(in)) + copy(*out, in) + } else { + out.Namespaces = nil + } + out.TopologyKey = in.TopologyKey + return nil +} + +func DeepCopy_v1_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error { + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(in)) + for i := range in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.PreferredDuringSchedulingIgnoredDuringExecution = nil + } + return nil +} + func DeepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err @@ -2998,3 +3093,11 @@ func DeepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion. } return nil } + +func DeepCopy_v1_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error { + out.Weight = in.Weight + if err := DeepCopy_v1_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil +} diff --git a/pkg/api/v1/generated.pb.go b/pkg/api/v1/generated.pb.go index d3247795393e2..57d5ca07c4519 100644 --- a/pkg/api/v1/generated.pb.go +++ b/pkg/api/v1/generated.pb.go @@ -119,6 +119,9 @@ limitations under the License. PersistentVolumeSpec PersistentVolumeStatus Pod + PodAffinity + PodAffinityTerm + PodAntiAffinity PodAttachOptions PodCondition PodExecOptions @@ -165,6 +168,7 @@ limitations under the License. Volume VolumeMount VolumeSource + WeightedPodAffinityTerm */ package v1 @@ -561,6 +565,18 @@ func (m *Pod) Reset() { *m = Pod{} } func (m *Pod) String() string { return proto.CompactTextString(m) } func (*Pod) ProtoMessage() {} +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (m *PodAffinity) String() string { return proto.CompactTextString(m) } +func (*PodAffinity) ProtoMessage() {} + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (m *PodAffinityTerm) String() string { return proto.CompactTextString(m) } +func (*PodAffinityTerm) ProtoMessage() {} + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (m *PodAntiAffinity) String() string { return proto.CompactTextString(m) } +func (*PodAntiAffinity) ProtoMessage() {} + func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (m *PodAttachOptions) String() string { return proto.CompactTextString(m) } func (*PodAttachOptions) ProtoMessage() {} @@ -745,6 +761,10 @@ func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (m *VolumeSource) String() string { return proto.CompactTextString(m) } func (*VolumeSource) ProtoMessage() {} +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (m *WeightedPodAffinityTerm) String() string { return proto.CompactTextString(m) } +func (*WeightedPodAffinityTerm) ProtoMessage() {} + func init() { proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource") proto.RegisterType((*Affinity)(nil), "k8s.io.kubernetes.pkg.api.v1.Affinity") @@ -840,6 +860,9 @@ func init() { proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSpec") proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeStatus") proto.RegisterType((*Pod)(nil), "k8s.io.kubernetes.pkg.api.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAntiAffinity") proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAttachOptions") proto.RegisterType((*PodCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.PodCondition") proto.RegisterType((*PodExecOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodExecOptions") @@ -886,6 +909,7 @@ func init() { proto.RegisterType((*Volume)(nil), "k8s.io.kubernetes.pkg.api.v1.Volume") proto.RegisterType((*VolumeMount)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeMount") proto.RegisterType((*VolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.WeightedPodAffinityTerm") } func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) { size := m.Size() @@ -949,6 +973,26 @@ func (m *Affinity) MarshalTo(data []byte) (int, error) { } i += n1 } + if m.PodAffinity != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PodAntiAffinity != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } return i, nil } @@ -1004,19 +1048,19 @@ func (m *Binding) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(data[i:]) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n2 + i += n4 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n3, err := m.Target.MarshalTo(data[i:]) + n5, err := m.Target.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n3 + i += n5 return i, nil } @@ -1114,11 +1158,11 @@ func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n4, err := m.SecretRef.MarshalTo(data[i:]) + n6, err := m.SecretRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n4 + i += n6 } data[i] = 0x30 i++ @@ -1217,11 +1261,11 @@ func (m *ComponentStatus) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(data[i:]) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n5 + i += n7 if len(m.Conditions) > 0 { for _, msg := range m.Conditions { data[i] = 0x12 @@ -1255,11 +1299,11 @@ func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) + n8, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n6 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1293,11 +1337,11 @@ func (m *ConfigMap) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n7 + i += n9 if len(m.Data) > 0 { for k := range m.Data { data[i] = 0x12 @@ -1336,11 +1380,11 @@ func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n8, err := m.LocalObjectReference.MarshalTo(data[i:]) + n10, err := m.LocalObjectReference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n8 + i += n10 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Key))) @@ -1366,11 +1410,11 @@ func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(data[i:]) + n11, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n9 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1404,11 +1448,11 @@ func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n10, err := m.LocalObjectReference.MarshalTo(data[i:]) + n12, err := m.LocalObjectReference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n10 + i += n12 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -1508,11 +1552,11 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n11, err := m.Resources.MarshalTo(data[i:]) + n13, err := m.Resources.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n11 + i += n13 if len(m.VolumeMounts) > 0 { for _, msg := range m.VolumeMounts { data[i] = 0x4a @@ -1529,31 +1573,31 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0x52 i++ i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) - n12, err := m.LivenessProbe.MarshalTo(data[i:]) + n14, err := m.LivenessProbe.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n12 + i += n14 } if m.ReadinessProbe != nil { data[i] = 0x5a i++ i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) - n13, err := m.ReadinessProbe.MarshalTo(data[i:]) + n15, err := m.ReadinessProbe.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n13 + i += n15 } if m.Lifecycle != nil { data[i] = 0x62 i++ i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) - n14, err := m.Lifecycle.MarshalTo(data[i:]) + n16, err := m.Lifecycle.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n14 + i += n16 } data[i] = 0x6a i++ @@ -1567,11 +1611,11 @@ func (m *Container) MarshalTo(data []byte) (int, error) { data[i] = 0x7a i++ i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n15, err := m.SecurityContext.MarshalTo(data[i:]) + n17, err := m.SecurityContext.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n15 + i += n17 } data[i] = 0x80 i++ @@ -1697,31 +1741,31 @@ func (m *ContainerState) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) - n16, err := m.Waiting.MarshalTo(data[i:]) + n18, err := m.Waiting.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n16 + i += n18 } if m.Running != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) - n17, err := m.Running.MarshalTo(data[i:]) + n19, err := m.Running.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n17 + i += n19 } if m.Terminated != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) - n18, err := m.Terminated.MarshalTo(data[i:]) + n20, err := m.Terminated.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n18 + i += n20 } return i, nil } @@ -1744,11 +1788,11 @@ func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n19, err := m.StartedAt.MarshalTo(data[i:]) + n21, err := m.StartedAt.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n19 + i += n21 return i, nil } @@ -1784,19 +1828,19 @@ func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n20, err := m.StartedAt.MarshalTo(data[i:]) + n22, err := m.StartedAt.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n20 + i += n22 data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) - n21, err := m.FinishedAt.MarshalTo(data[i:]) + n23, err := m.FinishedAt.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n21 + i += n23 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) @@ -1852,19 +1896,19 @@ func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.State.Size())) - n22, err := m.State.MarshalTo(data[i:]) + n24, err := m.State.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n22 + i += n24 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) - n23, err := m.LastTerminationState.MarshalTo(data[i:]) + n25, err := m.LastTerminationState.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n23 + i += n25 data[i] = 0x20 i++ if m.Ready { @@ -1936,11 +1980,11 @@ func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) - n24, err := m.Preconditions.MarshalTo(data[i:]) + n26, err := m.Preconditions.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n24 + i += n26 } if m.OrphanDependents != nil { data[i] = 0x18 @@ -1977,11 +2021,11 @@ func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n25, err := m.FieldRef.MarshalTo(data[i:]) + n27, err := m.FieldRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n25 + i += n27 return i, nil } @@ -2060,11 +2104,11 @@ func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) - n26, err := m.TargetRef.MarshalTo(data[i:]) + n28, err := m.TargetRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n26 + i += n28 } data[i] = 0x1a i++ @@ -2174,11 +2218,11 @@ func (m *Endpoints) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n27, err := m.ObjectMeta.MarshalTo(data[i:]) + n29, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n27 + i += n29 if len(m.Subsets) > 0 { for _, msg := range m.Subsets { data[i] = 0x12 @@ -2212,11 +2256,11 @@ func (m *EndpointsList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n28, err := m.ListMeta.MarshalTo(data[i:]) + n30, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n28 + i += n30 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -2259,11 +2303,11 @@ func (m *EnvVar) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) - n29, err := m.ValueFrom.MarshalTo(data[i:]) + n31, err := m.ValueFrom.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n29 + i += n31 } return i, nil } @@ -2287,31 +2331,31 @@ func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n30, err := m.FieldRef.MarshalTo(data[i:]) + n32, err := m.FieldRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n30 + i += n32 } if m.ConfigMapKeyRef != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) - n31, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) + n33, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n31 + i += n33 } if m.SecretKeyRef != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) - n32, err := m.SecretKeyRef.MarshalTo(data[i:]) + n34, err := m.SecretKeyRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n32 + i += n34 } return i, nil } @@ -2334,19 +2378,19 @@ func (m *Event) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(data[i:]) + n35, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n33 + i += n35 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) - n34, err := m.InvolvedObject.MarshalTo(data[i:]) + n36, err := m.InvolvedObject.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n34 + i += n36 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -2358,27 +2402,27 @@ func (m *Event) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) - n35, err := m.Source.MarshalTo(data[i:]) + n37, err := m.Source.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n35 + i += n37 data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) - n36, err := m.FirstTimestamp.MarshalTo(data[i:]) + n38, err := m.FirstTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n36 + i += n38 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) - n37, err := m.LastTimestamp.MarshalTo(data[i:]) + n39, err := m.LastTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n37 + i += n39 data[i] = 0x40 i++ i = encodeVarintGenerated(data, i, uint64(m.Count)) @@ -2407,11 +2451,11 @@ func (m *EventList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n38, err := m.ListMeta.MarshalTo(data[i:]) + n40, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n38 + i += n40 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -2597,11 +2641,11 @@ func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n39, err := m.SecretRef.MarshalTo(data[i:]) + n41, err := m.SecretRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } data[i] = 0x20 i++ @@ -2776,11 +2820,11 @@ func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n40, err := m.Port.MarshalTo(data[i:]) + n42, err := m.Port.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n40 + i += n42 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Host))) @@ -2849,31 +2893,31 @@ func (m *Handler) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) - n41, err := m.Exec.MarshalTo(data[i:]) + n43, err := m.Exec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n41 + i += n43 } if m.HTTPGet != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) - n42, err := m.HTTPGet.MarshalTo(data[i:]) + n44, err := m.HTTPGet.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } if m.TCPSocket != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) - n43, err := m.TCPSocket.MarshalTo(data[i:]) + n45, err := m.TCPSocket.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n43 + i += n45 } return i, nil } @@ -2990,21 +3034,21 @@ func (m *Lifecycle) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) - n44, err := m.PostStart.MarshalTo(data[i:]) + n46, err := m.PostStart.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n44 + i += n46 } if m.PreStop != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) - n45, err := m.PreStop.MarshalTo(data[i:]) + n47, err := m.PreStop.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n45 + i += n47 } return i, nil } @@ -3027,19 +3071,19 @@ func (m *LimitRange) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n46, err := m.ObjectMeta.MarshalTo(data[i:]) + n48, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n46 + i += n48 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n47, err := m.Spec.MarshalTo(data[i:]) + n49, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n47 + i += n49 return i, nil } @@ -3077,11 +3121,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n48, err := (&v).MarshalTo(data[i:]) + n50, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n48 + i += n50 } } if len(m.Min) > 0 { @@ -3099,11 +3143,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n49, err := (&v).MarshalTo(data[i:]) + n51, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n49 + i += n51 } } if len(m.Default) > 0 { @@ -3121,11 +3165,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n50, err := (&v).MarshalTo(data[i:]) + n52, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n50 + i += n52 } } if len(m.DefaultRequest) > 0 { @@ -3143,11 +3187,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n51, err := (&v).MarshalTo(data[i:]) + n53, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n51 + i += n53 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -3165,11 +3209,11 @@ func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n52, err := (&v).MarshalTo(data[i:]) + n54, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n52 + i += n54 } } return i, nil @@ -3193,11 +3237,11 @@ func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n53, err := m.ListMeta.MarshalTo(data[i:]) + n55, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n53 + i += n55 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3261,11 +3305,11 @@ func (m *List) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n54, err := m.ListMeta.MarshalTo(data[i:]) + n56, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n54 + i += n56 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3454,27 +3498,27 @@ func (m *Namespace) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n55, err := m.ObjectMeta.MarshalTo(data[i:]) + n57, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n55 + i += n57 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n56, err := m.Spec.MarshalTo(data[i:]) + n58, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n56 + i += n58 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n57, err := m.Status.MarshalTo(data[i:]) + n59, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n57 + i += n59 return i, nil } @@ -3496,11 +3540,11 @@ func (m *NamespaceList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n58, err := m.ListMeta.MarshalTo(data[i:]) + n60, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n58 + i += n60 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3589,27 +3633,27 @@ func (m *Node) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(data[i:]) + n61, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n59 + i += n61 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(data[i:]) + n62, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n60 + i += n62 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(data[i:]) + n63, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n61 + i += n63 return i, nil } @@ -3658,11 +3702,11 @@ func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n62, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) + n64, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n62 + i += n64 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -3705,19 +3749,19 @@ func (m *NodeCondition) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) - n63, err := m.LastHeartbeatTime.MarshalTo(data[i:]) + n65, err := m.LastHeartbeatTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n63 + i += n65 data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n64, err := m.LastTransitionTime.MarshalTo(data[i:]) + n66, err := m.LastTransitionTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n64 + i += n66 data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -3747,11 +3791,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) - n65, err := m.KubeletEndpoint.MarshalTo(data[i:]) + n67, err := m.KubeletEndpoint.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n65 + i += n67 return i, nil } @@ -3773,11 +3817,11 @@ func (m *NodeList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n66, err := m.ListMeta.MarshalTo(data[i:]) + n68, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n66 + i += n68 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3984,11 +4028,11 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n67, err := (&v).MarshalTo(data[i:]) + n69, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n67 + i += n69 } } if len(m.Allocatable) > 0 { @@ -4006,11 +4050,11 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n68, err := (&v).MarshalTo(data[i:]) + n70, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n68 + i += n70 } } data[i] = 0x1a @@ -4044,19 +4088,19 @@ func (m *NodeStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) - n69, err := m.DaemonEndpoints.MarshalTo(data[i:]) + n71, err := m.DaemonEndpoints.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n69 + i += n71 data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) - n70, err := m.NodeInfo.MarshalTo(data[i:]) + n72, err := m.NodeInfo.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n70 + i += n72 if len(m.Images) > 0 { for _, msg := range m.Images { data[i] = 0x42 @@ -4193,20 +4237,20 @@ func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) - n71, err := m.CreationTimestamp.MarshalTo(data[i:]) + n73, err := m.CreationTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n71 + i += n73 if m.DeletionTimestamp != nil { data[i] = 0x4a i++ i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) - n72, err := m.DeletionTimestamp.MarshalTo(data[i:]) + n74, err := m.DeletionTimestamp.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n72 + i += n74 } if m.DeletionGracePeriodSeconds != nil { data[i] = 0x50 @@ -4375,27 +4419,27 @@ func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n73, err := m.ObjectMeta.MarshalTo(data[i:]) + n75, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n73 + i += n75 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n74, err := m.Spec.MarshalTo(data[i:]) + n76, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n74 + i += n76 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n75, err := m.Status.MarshalTo(data[i:]) + n77, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n75 + i += n77 return i, nil } @@ -4417,27 +4461,27 @@ func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n76, err := m.ObjectMeta.MarshalTo(data[i:]) + n78, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n76 + i += n78 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n77, err := m.Spec.MarshalTo(data[i:]) + n79, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n77 + i += n79 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n78, err := m.Status.MarshalTo(data[i:]) + n80, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n78 + i += n80 return i, nil } @@ -4459,11 +4503,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n79, err := m.ListMeta.MarshalTo(data[i:]) + n81, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n79 + i += n81 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -4512,11 +4556,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n80, err := m.Resources.MarshalTo(data[i:]) + n82, err := m.Resources.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n80 + i += n82 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) @@ -4573,11 +4617,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n81, err := (&v).MarshalTo(data[i:]) + n83, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n81 + i += n83 } } return i, nil @@ -4631,11 +4675,11 @@ func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n82, err := m.ListMeta.MarshalTo(data[i:]) + n84, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n82 + i += n84 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -4670,131 +4714,131 @@ func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n83, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + n85, err := m.GCEPersistentDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n83 + i += n85 } if m.AWSElasticBlockStore != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n84, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + n86, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n84 + i += n86 } if m.HostPath != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n85, err := m.HostPath.MarshalTo(data[i:]) + n87, err := m.HostPath.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n85 + i += n87 } if m.Glusterfs != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n86, err := m.Glusterfs.MarshalTo(data[i:]) + n88, err := m.Glusterfs.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n86 + i += n88 } if m.NFS != nil { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n87, err := m.NFS.MarshalTo(data[i:]) + n89, err := m.NFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n87 + i += n89 } if m.RBD != nil { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n88, err := m.RBD.MarshalTo(data[i:]) + n90, err := m.RBD.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n88 + i += n90 } if m.ISCSI != nil { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n89, err := m.ISCSI.MarshalTo(data[i:]) + n91, err := m.ISCSI.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n89 + i += n91 } if m.Cinder != nil { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n90, err := m.Cinder.MarshalTo(data[i:]) + n92, err := m.Cinder.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n90 + i += n92 } if m.CephFS != nil { data[i] = 0x4a i++ i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n91, err := m.CephFS.MarshalTo(data[i:]) + n93, err := m.CephFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n91 + i += n93 } if m.FC != nil { data[i] = 0x52 i++ i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n92, err := m.FC.MarshalTo(data[i:]) + n94, err := m.FC.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n92 + i += n94 } if m.Flocker != nil { data[i] = 0x5a i++ i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n93, err := m.Flocker.MarshalTo(data[i:]) + n95, err := m.Flocker.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n93 + i += n95 } if m.FlexVolume != nil { data[i] = 0x62 i++ i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n94, err := m.FlexVolume.MarshalTo(data[i:]) + n96, err := m.FlexVolume.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n94 + i += n96 } if m.AzureFile != nil { data[i] = 0x6a i++ i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n95, err := m.AzureFile.MarshalTo(data[i:]) + n97, err := m.AzureFile.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n95 + i += n97 } return i, nil } @@ -4829,21 +4873,21 @@ func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n96, err := (&v).MarshalTo(data[i:]) + n98, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n96 + i += n98 } } data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) - n97, err := m.PersistentVolumeSource.MarshalTo(data[i:]) + n99, err := m.PersistentVolumeSource.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n97 + i += n99 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { data[i] = 0x1a @@ -4863,11 +4907,11 @@ func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) - n98, err := m.ClaimRef.MarshalTo(data[i:]) + n100, err := m.ClaimRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n98 + i += n100 } data[i] = 0x2a i++ @@ -4924,31 +4968,31 @@ func (m *Pod) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n99, err := m.ObjectMeta.MarshalTo(data[i:]) + n101, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n99 + i += n101 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n100, err := m.Spec.MarshalTo(data[i:]) + n102, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n100 + i += n102 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n101, err := m.Status.MarshalTo(data[i:]) + n103, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n101 + i += n103 return i, nil } -func (m *PodAttachOptions) Marshal() (data []byte, err error) { +func (m *PodAffinity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -4958,51 +5002,39 @@ func (m *PodAttachOptions) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { +func (m *PodAffinity) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0x8 - i++ - if m.Stdin { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Stdout { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Stderr { - data[i] = 1 - } else { - data[i] = 0 + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } } - i++ - data[i] = 0x20 - i++ - if m.TTY { - data[i] = 1 - } else { - data[i] = 0 + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } } - i++ - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) return i, nil } -func (m *PodCondition) Marshal() (data []byte, err error) { +func (m *PodAffinityTerm) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) n, err := m.MarshalTo(data) @@ -5012,35 +5044,178 @@ func (m *PodCondition) Marshal() (data []byte, err error) { return data[:n], nil } -func (m *PodCondition) MarshalTo(data []byte) (int, error) { +func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n102, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err + if m.LabelSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) + n104, err := m.LabelSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n104 } - i += n102 - data[i] = 0x22 + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x1a i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n103, err := m.LastTransitionTime.MarshalTo(data[i:]) + i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey))) + i += copy(data[i:], m.TopologyKey) + return i, nil +} + +func (m *PodAntiAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) if err != nil { - return 0, err + return nil, err } - i += n103 + return data[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAttachOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + return i, nil +} + +func (m *PodCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n105, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n105 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n106, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n106 data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) @@ -5139,11 +5314,11 @@ func (m *PodList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n104, err := m.ListMeta.MarshalTo(data[i:]) + n107, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n104 + i += n107 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -5203,11 +5378,11 @@ func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) - n105, err := m.SinceTime.MarshalTo(data[i:]) + n108, err := m.SinceTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n105 + i += n108 } data[i] = 0x30 i++ @@ -5271,11 +5446,11 @@ func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n106, err := m.SELinuxOptions.MarshalTo(data[i:]) + n109, err := m.SELinuxOptions.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n106 + i += n109 } if m.RunAsUser != nil { data[i] = 0x10 @@ -5421,11 +5596,11 @@ func (m *PodSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x72 i++ i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n107, err := m.SecurityContext.MarshalTo(data[i:]) + n110, err := m.SecurityContext.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n107 + i += n110 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -5505,11 +5680,11 @@ func (m *PodStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n108, err := m.StartTime.MarshalTo(data[i:]) + n111, err := m.StartTime.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n108 + i += n111 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -5544,19 +5719,19 @@ func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n109, err := m.ObjectMeta.MarshalTo(data[i:]) + n112, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n109 + i += n112 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n110, err := m.Status.MarshalTo(data[i:]) + n113, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n110 + i += n113 return i, nil } @@ -5578,19 +5753,19 @@ func (m *PodTemplate) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n111, err := m.ObjectMeta.MarshalTo(data[i:]) + n114, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n111 + i += n114 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n112, err := m.Template.MarshalTo(data[i:]) + n115, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n112 + i += n115 return i, nil } @@ -5612,11 +5787,11 @@ func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n113, err := m.ListMeta.MarshalTo(data[i:]) + n116, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n113 + i += n116 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -5650,19 +5825,19 @@ func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n114, err := m.ObjectMeta.MarshalTo(data[i:]) + n117, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n114 + i += n117 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n115, err := m.Spec.MarshalTo(data[i:]) + n118, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n115 + i += n118 return i, nil } @@ -5711,11 +5886,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) - n116, err := m.Preference.MarshalTo(data[i:]) + n119, err := m.Preference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n116 + i += n119 return i, nil } @@ -5737,11 +5912,11 @@ func (m *Probe) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) - n117, err := m.Handler.MarshalTo(data[i:]) + n120, err := m.Handler.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n117 + i += n120 data[i] = 0x10 i++ i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) @@ -5814,11 +5989,11 @@ func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n118, err := m.SecretRef.MarshalTo(data[i:]) + n121, err := m.SecretRef.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n118 + i += n121 } data[i] = 0x40 i++ @@ -5849,11 +6024,11 @@ func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n119, err := m.ObjectMeta.MarshalTo(data[i:]) + n122, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n119 + i += n122 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Range))) @@ -5885,27 +6060,27 @@ func (m *ReplicationController) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n120, err := m.ObjectMeta.MarshalTo(data[i:]) + n123, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n120 + i += n123 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n121, err := m.Spec.MarshalTo(data[i:]) + n124, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n121 + i += n124 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n122, err := m.Status.MarshalTo(data[i:]) + n125, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n122 + i += n125 return i, nil } @@ -5927,11 +6102,11 @@ func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n123, err := m.ListMeta.MarshalTo(data[i:]) + n126, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n123 + i += n126 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -5988,11 +6163,11 @@ func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n124, err := m.Template.MarshalTo(data[i:]) + n127, err := m.Template.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n124 + i += n127 } return i, nil } @@ -6042,27 +6217,27 @@ func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n125, err := m.ObjectMeta.MarshalTo(data[i:]) + n128, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n125 + i += n128 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n126, err := m.Spec.MarshalTo(data[i:]) + n129, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n126 + i += n129 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n127, err := m.Status.MarshalTo(data[i:]) + n130, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n127 + i += n130 return i, nil } @@ -6084,11 +6259,11 @@ func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n128, err := m.ListMeta.MarshalTo(data[i:]) + n131, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n128 + i += n131 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6134,11 +6309,11 @@ func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n129, err := (&v).MarshalTo(data[i:]) + n132, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n129 + i += n132 } } if len(m.Scopes) > 0 { @@ -6189,11 +6364,11 @@ func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n130, err := (&v).MarshalTo(data[i:]) + n133, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n130 + i += n133 } } if len(m.Used) > 0 { @@ -6211,11 +6386,11 @@ func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n131, err := (&v).MarshalTo(data[i:]) + n134, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n131 + i += n134 } } return i, nil @@ -6251,11 +6426,11 @@ func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n132, err := (&v).MarshalTo(data[i:]) + n135, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n132 + i += n135 } } if len(m.Requests) > 0 { @@ -6273,11 +6448,11 @@ func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n133, err := (&v).MarshalTo(data[i:]) + n136, err := (&v).MarshalTo(data[i:]) if err != nil { return 0, err } - i += n133 + i += n136 } } return i, nil @@ -6335,11 +6510,11 @@ func (m *Secret) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n134, err := m.ObjectMeta.MarshalTo(data[i:]) + n137, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n134 + i += n137 if len(m.Data) > 0 { for k := range m.Data { data[i] = 0x12 @@ -6382,11 +6557,11 @@ func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n135, err := m.LocalObjectReference.MarshalTo(data[i:]) + n138, err := m.LocalObjectReference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n135 + i += n138 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Key))) @@ -6412,11 +6587,11 @@ func (m *SecretList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n136, err := m.ListMeta.MarshalTo(data[i:]) + n139, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n136 + i += n139 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6473,11 +6648,11 @@ func (m *SecurityContext) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) - n137, err := m.Capabilities.MarshalTo(data[i:]) + n140, err := m.Capabilities.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n137 + i += n140 } if m.Privileged != nil { data[i] = 0x10 @@ -6493,11 +6668,11 @@ func (m *SecurityContext) MarshalTo(data []byte) (int, error) { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n138, err := m.SELinuxOptions.MarshalTo(data[i:]) + n141, err := m.SELinuxOptions.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n138 + i += n141 } if m.RunAsUser != nil { data[i] = 0x20 @@ -6545,11 +6720,11 @@ func (m *SerializedReference) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) - n139, err := m.Reference.MarshalTo(data[i:]) + n142, err := m.Reference.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n139 + i += n142 return i, nil } @@ -6571,27 +6746,27 @@ func (m *Service) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(data[i:]) + n143, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n140 + i += n143 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n141, err := m.Spec.MarshalTo(data[i:]) + n144, err := m.Spec.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n141 + i += n144 data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n142, err := m.Status.MarshalTo(data[i:]) + n145, err := m.Status.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n142 + i += n145 return i, nil } @@ -6613,11 +6788,11 @@ func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n143, err := m.ObjectMeta.MarshalTo(data[i:]) + n146, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n143 + i += n146 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { data[i] = 0x12 @@ -6663,11 +6838,11 @@ func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n144, err := m.ListMeta.MarshalTo(data[i:]) + n147, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n144 + i += n147 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6701,11 +6876,11 @@ func (m *ServiceList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n145, err := m.ListMeta.MarshalTo(data[i:]) + n148, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n145 + i += n148 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -6750,11 +6925,11 @@ func (m *ServicePort) MarshalTo(data []byte) (int, error) { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) - n146, err := m.TargetPort.MarshalTo(data[i:]) + n149, err := m.TargetPort.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n146 + i += n149 data[i] = 0x28 i++ i = encodeVarintGenerated(data, i, uint64(m.NodePort)) @@ -6894,11 +7069,11 @@ func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n147, err := m.LoadBalancer.MarshalTo(data[i:]) + n150, err := m.LoadBalancer.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n147 + i += n150 return i, nil } @@ -6920,11 +7095,11 @@ func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n148, err := m.Port.MarshalTo(data[i:]) + n151, err := m.Port.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n148 + i += n151 return i, nil } @@ -6950,11 +7125,11 @@ func (m *Volume) MarshalTo(data []byte) (int, error) { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) - n149, err := m.VolumeSource.MarshalTo(data[i:]) + n152, err := m.VolumeSource.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n149 + i += n152 return i, nil } @@ -7011,151 +7186,151 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n150, err := m.HostPath.MarshalTo(data[i:]) + n153, err := m.HostPath.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n150 + i += n153 } if m.EmptyDir != nil { data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) - n151, err := m.EmptyDir.MarshalTo(data[i:]) + n154, err := m.EmptyDir.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n151 + i += n154 } if m.GCEPersistentDisk != nil { data[i] = 0x1a i++ i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n152, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + n155, err := m.GCEPersistentDisk.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n152 + i += n155 } if m.AWSElasticBlockStore != nil { data[i] = 0x22 i++ i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n153, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + n156, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n153 + i += n156 } if m.GitRepo != nil { data[i] = 0x2a i++ i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) - n154, err := m.GitRepo.MarshalTo(data[i:]) + n157, err := m.GitRepo.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n154 + i += n157 } if m.Secret != nil { data[i] = 0x32 i++ i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) - n155, err := m.Secret.MarshalTo(data[i:]) + n158, err := m.Secret.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n155 + i += n158 } if m.NFS != nil { data[i] = 0x3a i++ i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n156, err := m.NFS.MarshalTo(data[i:]) + n159, err := m.NFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n156 + i += n159 } if m.ISCSI != nil { data[i] = 0x42 i++ i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n157, err := m.ISCSI.MarshalTo(data[i:]) + n160, err := m.ISCSI.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n157 + i += n160 } if m.Glusterfs != nil { data[i] = 0x4a i++ i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n158, err := m.Glusterfs.MarshalTo(data[i:]) + n161, err := m.Glusterfs.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n158 + i += n161 } if m.PersistentVolumeClaim != nil { data[i] = 0x52 i++ i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) - n159, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) + n162, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n159 + i += n162 } if m.RBD != nil { data[i] = 0x5a i++ i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n160, err := m.RBD.MarshalTo(data[i:]) + n163, err := m.RBD.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n160 + i += n163 } if m.FlexVolume != nil { data[i] = 0x62 i++ i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n161, err := m.FlexVolume.MarshalTo(data[i:]) + n164, err := m.FlexVolume.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n161 + i += n164 } if m.Cinder != nil { data[i] = 0x6a i++ i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n162, err := m.Cinder.MarshalTo(data[i:]) + n165, err := m.Cinder.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n162 + i += n165 } if m.CephFS != nil { data[i] = 0x72 i++ i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n163, err := m.CephFS.MarshalTo(data[i:]) + n166, err := m.CephFS.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n163 + i += n166 } if m.Flocker != nil { data[i] = 0x7a i++ i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n164, err := m.Flocker.MarshalTo(data[i:]) + n167, err := m.Flocker.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n164 + i += n167 } if m.DownwardAPI != nil { data[i] = 0x82 @@ -7163,11 +7338,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) - n165, err := m.DownwardAPI.MarshalTo(data[i:]) + n168, err := m.DownwardAPI.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n165 + i += n168 } if m.FC != nil { data[i] = 0x8a @@ -7175,11 +7350,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n166, err := m.FC.MarshalTo(data[i:]) + n169, err := m.FC.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n166 + i += n169 } if m.AzureFile != nil { data[i] = 0x92 @@ -7187,11 +7362,11 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n167, err := m.AzureFile.MarshalTo(data[i:]) + n170, err := m.AzureFile.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n167 + i += n170 } if m.ConfigMap != nil { data[i] = 0x9a @@ -7199,12 +7374,41 @@ func (m *VolumeSource) MarshalTo(data []byte) (int, error) { data[i] = 0x1 i++ i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) - n168, err := m.ConfigMap.MarshalTo(data[i:]) + n171, err := m.ConfigMap.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n168 + i += n171 + } + return i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) + n172, err := m.PodAffinityTerm.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n172 return i, nil } @@ -7254,6 +7458,14 @@ func (m *Affinity) Size() (n int) { l = m.NodeAffinity.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -8729,15 +8941,69 @@ func (m *Pod) Size() (n int) { return n } -func (m *PodAttachOptions) Size() (n int) { +func (m *PodAffinity) Size() (n int) { var l int _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -9545,6 +9811,15 @@ func (m *VolumeSource) Size() (n int) { return n } +func (m *WeightedPodAffinityTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func sovGenerated(x uint64) (n int) { for { n++ @@ -9767,6 +10042,72 @@ func (m *Affinity) Unmarshal(data []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(data[iNdEx:]) @@ -23848,19 +24189,357 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClaimRef == nil { + m.ClaimRef = &ObjectReference{} + } + if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } - iNdEx = postmsgIndex - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23884,15 +24563,15 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23902,24 +24581,75 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23943,18 +24673,16 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClaimRef == nil { - m.ClaimRef = &ObjectReference{} - } - if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23964,20 +24692,22 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -24000,7 +24730,7 @@ func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { } return nil } -func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { +func (m *PodAffinityTerm) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -24023,17 +24753,17 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24043,24 +24773,28 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { } b := data[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) + if m.LabelSelector == nil { + m.LabelSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24085,11 +24819,11 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(data[iNdEx:postIndex]) + m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24114,7 +24848,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(data[iNdEx:postIndex]) + m.TopologyKey = string(data[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -24137,7 +24871,7 @@ func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { } return nil } -func (m *Pod) Unmarshal(data []byte) error { +func (m *PodAntiAffinity) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 for iNdEx < l { @@ -24160,15 +24894,15 @@ func (m *Pod) Unmarshal(data []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Pod: wiretype end group for non-group") + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24192,43 +24926,14 @@ func (m *Pod) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24252,7 +24957,8 @@ func (m *Pod) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32274,6 +32980,105 @@ func (m *VolumeSource) Unmarshal(data []byte) error { } return nil } +func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(data []byte) (n int, err error) { l := len(data) iNdEx := 0 diff --git a/pkg/api/v1/generated.proto b/pkg/api/v1/generated.proto index d0445758677a0..7a8294b31601c 100644 --- a/pkg/api/v1/generated.proto +++ b/pkg/api/v1/generated.proto @@ -59,11 +59,16 @@ message AWSElasticBlockStoreVolumeSource { optional bool readOnly = 4; } -// Affinity is a group of affinity scheduling rules, currently -// only node affinity, but in the future also inter-pod affinity. +// Affinity is a group of affinity scheduling rules. message Affinity { // Describes node affinity scheduling rules for the pod. optional NodeAffinity nodeAffinity = 1; + + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + optional PodAffinity podAffinity = 2; + + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + optional PodAntiAffinity podAntiAffinity = 3; } // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. @@ -1726,6 +1731,96 @@ message Pod { optional PodStatus status = 3; } +// Pod affinity is a group of inter pod affinity scheduling rules. +message PodAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +message PodAffinityTerm { + // A label query over a set of resources, in this case pods. + optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1; + + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // nil list means "this pod's namespace," empty list means "all namespaces" + // The json tag here is not "omitempty" since we need to distinguish nil and empty. + // See https://golang.org/pkg/encoding/json/#Marshal for more details. + repeated string namespaces = 2; + + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + optional string topologyKey = 3; +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +message PodAntiAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + // PodAttachOptions is the query options to a Pod's remote attach call. // --- // TODO: merge w/ PodExecOptions below for stdin, stdout, etc @@ -2696,3 +2791,13 @@ message VolumeSource { optional ConfigMapVolumeSource configMap = 19; } +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +message WeightedPodAffinityTerm { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + optional int32 weight = 1; + + // Required. A pod affinity term, associated with the corresponding weight. + optional PodAffinityTerm podAffinityTerm = 2; +} + diff --git a/pkg/api/v1/types.generated.go b/pkg/api/v1/types.generated.go index 0170d4dc5364e..54f98733c34b1 100644 --- a/pkg/api/v1/types.generated.go +++ b/pkg/api/v1/types.generated.go @@ -22015,13 +22015,15 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool + var yyq2 [3]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) + r.EncodeArrayStart(3) } else { yynn2 = 0 for _, b := range yyq2 { @@ -22055,6 +22057,52 @@ func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { } } } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { @@ -22127,6 +22175,28 @@ func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { } x.NodeAffinity.CodecDecodeSelf(d) } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } default: z.DecStructFieldNotFound(-1, yys3) } // end switch yys3 @@ -22138,42 +22208,1105 @@ func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l } else { - yyb5 = r.CheckBreak() + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv5 := &x.PodAffinityTerm + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + x.Weight = int32(r.DecodeInt(32)) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv8 := &x.PodAffinityTerm + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_unversioned.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() } - if yyb5 { + if yyb9 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } z.DecSendContainerState(codecSelfer_containerArrayElem1234) if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } + x.Namespaces = nil } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) + yyv12 := &x.Namespaces + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) } - x.NodeAffinity.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + x.TopologyKey = string(r.DecodeString()) } for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l } else { - yyb5 = r.CheckBreak() + yyb9 = r.CheckBreak() } - if yyb5 { + if yyb9 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") + z.DecStructFieldNotFound(yyj9-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -51529,6 +52662,244 @@ func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequir } } +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 93416b587f062..d1fe5b2095a57 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -1394,11 +1394,109 @@ const ( NodeSelectorOpLt NodeSelectorOperator = "Lt" ) -// Affinity is a group of affinity scheduling rules, currently -// only node affinity, but in the future also inter-pod affinity. +// Affinity is a group of affinity scheduling rules. type Affinity struct { // Describes node affinity scheduling rules for the pod. NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key tches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // nil list means "this pod's namespace," empty list means "all namespaces" + // The json tag here is not "omitempty" since we need to distinguish nil and empty. + // See https://golang.org/pkg/encoding/json/#Marshal for more details. + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` } // Node affinity is a group of node affinity scheduling rules. diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index 1441bffe36237..190317bd22f83 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -40,8 +40,10 @@ func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { } var map_Affinity = map[string]string{ - "": "Affinity is a group of affinity scheduling rules, currently only node affinity, but in the future also inter-pod affinity.", - "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "": "Affinity is a group of affinity scheduling rules.", + "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", } func (Affinity) SwaggerDoc() map[string]string { @@ -1086,6 +1088,37 @@ func (Pod) SwaggerDoc() map[string]string { return map_Pod } +var map_PodAffinity = map[string]string{ + "": "Pod affinity is a group of inter pod affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAffinity) SwaggerDoc() map[string]string { + return map_PodAffinity +} + +var map_PodAffinityTerm = map[string]string{ + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key tches that of any node on which a pod of the set of pods is running", + "labelSelector": "A label query over a set of resources, in this case pods.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", +} + +func (PodAffinityTerm) SwaggerDoc() map[string]string { + return map_PodAffinityTerm +} + +var map_PodAntiAffinity = map[string]string{ + "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAntiAffinity) SwaggerDoc() map[string]string { + return map_PodAntiAffinity +} + var map_PodAttachOptions = map[string]string{ "": "PodAttachOptions is the query options to a Pod's remote attach call.", "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", @@ -1628,4 +1661,14 @@ func (VolumeSource) SwaggerDoc() map[string]string { return map_VolumeSource } +var map_WeightedPodAffinityTerm = map[string]string{ + "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", +} + +func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { + return map_WeightedPodAffinityTerm +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 95d729cb2a595..7d2ac76751a20 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/api/resource" apiservice "k8s.io/kubernetes/pkg/api/service" "k8s.io/kubernetes/pkg/api/unversioned" + unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/labels" @@ -74,26 +75,6 @@ var BannedOwners = map[unversioned.GroupVersionKind]struct{}{ v1.SchemeGroupVersion.WithKind("Event"): {}, } -func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if !validation.IsQualifiedName(labelName) { - allErrs = append(allErrs, field.Invalid(fldPath, labelName, qualifiedNameErrorMsg)) - } - return allErrs -} - -// ValidateLabels validates that a set of labels are correctly defined. -func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, v := range labels { - allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) - if !validation.IsValidLabelValue(v) { - allErrs = append(allErrs, field.Invalid(fldPath, v, labelValueErrorMsg)) - } - } - return allErrs -} - // ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue func ValidateHasLabel(meta api.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { allErrs := field.ErrorList{} @@ -361,7 +342,7 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val } } allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...) - allErrs = append(allErrs, ValidateLabels(meta.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...) @@ -416,7 +397,7 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.P allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...) allErrs = append(allErrs, ValidateImmutableField(newMeta.Finalizers, oldMeta.Finalizers, fldPath.Child("finalizers"))...) - allErrs = append(allErrs, ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...) @@ -1448,7 +1429,7 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) - allErrs = append(allErrs, ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) if len(spec.ServiceAccountName) > 0 { @@ -1500,7 +1481,7 @@ func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *fi default: allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) } - allErrs = append(allErrs, ValidateLabelName(rq.Key, fldPath.Child("key"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) return allErrs } @@ -1547,6 +1528,87 @@ func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPa return allErrs } +// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data +func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) + for _, name := range podAffinityTerm.Namespaces { + if ok, _ := ValidateNamespaceName(name, false); !ok { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, DNS1123LabelErrorMsg)) + } + } + if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity")) + } + if len(podAffinityTerm.TopologyKey) != 0 { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) + } + return allErrs +} + +// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data +func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, podAffinityTerm := range podAffinityTerms { + allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...) + } + return allErrs +} + +// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for j, weightedTerm := range weightedPodAffinityTerms { + if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) + } + allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...) + } + return allErrs +} + +// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data +func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for hard pod anti-affinity + allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is allowed for soft pod anti-affinity + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// validatePodAffinity tests that the specified podAffinity fields have valid data +func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for hard pod affinity + allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + // empty topologyKey is not allowed for soft pod affinity + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + // ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} @@ -1557,23 +1619,29 @@ func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *fi return allErrs } + affinityFldPath := fldPath.Child(api.AffinityAnnotationKey) if affinity.NodeAffinity != nil { na := affinity.NodeAffinity - + naFldPath := affinityFldPath.Child("nodeAffinity") // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) // } if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) } if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) } } + if affinity.PodAffinity != nil { + allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...) + } + if affinity.PodAntiAffinity != nil { + allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...) + } return allErrs } @@ -1749,7 +1817,7 @@ func ValidateService(service *api.Service) field.ErrorList { } if service.Spec.Selector != nil { - allErrs = append(allErrs, ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) } if len(service.Spec.SessionAffinity) == 0 { @@ -1968,7 +2036,7 @@ func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldP // ValidatePodTemplateSpec validates the spec of a pod template func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateLabels(spec.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 60d35ec0b426b..c7682cf78e4c9 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -282,67 +282,6 @@ func TestValidateObjectMetaUpdateDisallowsUpdatingFinalizers(t *testing.T) { } } -func TestValidateLabels(t *testing.T) { - successCases := []map[string]string{ - {"simple": "bar"}, - {"now-with-dashes": "bar"}, - {"1-starts-with-num": "bar"}, - {"1234": "bar"}, - {"simple/simple": "bar"}, - {"now-with-dashes/simple": "bar"}, - {"now-with-dashes/now-with-dashes": "bar"}, - {"now.with.dots/simple": "bar"}, - {"now-with.dashes-and.dots/simple": "bar"}, - {"1-num.2-num/3-num": "bar"}, - {"1234/5678": "bar"}, - {"1.2.3.4/5678": "bar"}, - {"UpperCaseAreOK123": "bar"}, - {"goodvalue": "123_-.BaR"}, - } - for i := range successCases { - errs := ValidateLabels(successCases[i], field.NewPath("field")) - if len(errs) != 0 { - t.Errorf("case[%d] expected success, got %#v", i, errs) - } - } - - labelNameErrorCases := []map[string]string{ - {"nospecialchars^=@": "bar"}, - {"cantendwithadash-": "bar"}, - {"only/one/slash": "bar"}, - {strings.Repeat("a", 254): "bar"}, - } - for i := range labelNameErrorCases { - errs := ValidateLabels(labelNameErrorCases[i], field.NewPath("field")) - if len(errs) != 1 { - t.Errorf("case[%d] expected failure", i) - } else { - detail := errs[0].Detail - if detail != qualifiedNameErrorMsg { - t.Errorf("error detail %s should be equal %s", detail, qualifiedNameErrorMsg) - } - } - } - - labelValueErrorCases := []map[string]string{ - {"toolongvalue": strings.Repeat("a", 64)}, - {"backslashesinvalue": "some\\bad\\value"}, - {"nocommasallowed": "bad,value"}, - {"strangecharsinvalue": "?#$notsogood"}, - } - for i := range labelValueErrorCases { - errs := ValidateLabels(labelValueErrorCases[i], field.NewPath("field")) - if len(errs) != 1 { - t.Errorf("case[%d] expected failure", i) - } else { - detail := errs[0].Detail - if detail != labelValueErrorMsg { - t.Errorf("error detail %s should be equal %s", detail, labelValueErrorMsg) - } - } - } -} - func TestValidateAnnotations(t *testing.T) { successCases := []map[string]string{ {"simple": "bar"}, @@ -1969,20 +1908,74 @@ func TestValidatePod(t *testing.T) { NodeName: "foobar", }, }, + } + for _, pod := range successCases { + if errs := ValidatePod(&pod); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]api.Pod{ + "bad name": { + ObjectMeta: api.ObjectMeta{Name: "", Namespace: "ns"}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + "bad namespace": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + "bad spec": { + ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "ns"}, + Spec: api.PodSpec{ + Containers: []api.Container{{}}, + }, + }, + "bad label": { + ObjectMeta: api.ObjectMeta{ + Name: "abc", + Namespace: "ns", + Labels: map[string]string{ + "NoUppercaseOrSpecialCharsLike=Equals": "bar", + }, + }, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + }, + }, + } + for k, v := range errorCases { + if errs := ValidatePod(&v); len(errs) == 0 { + t.Errorf("expected failure for %q", k) + } + } +} + +func TestValidateAffinity(t *testing.T) { + successCases := []api.Pod{ { // Serialized affinity requirements in annotations. ObjectMeta: api.ObjectMeta{ Name: "123", Namespace: "ns", // TODO: Uncomment and move this block into Annotations map once // RequiredDuringSchedulingRequiredDuringExecution is implemented - // "requiredDuringSchedulingRequiredDuringExecution": { - // "nodeSelectorTerms": [{ - // "matchExpressions": [{ - // "key": "key1", - // "operator": "Exists" - // }] - // }] - // }, + // "requiredDuringSchedulingRequiredDuringExecution": { + // "nodeSelectorTerms": [{ + // "matchExpressions": [{ + // "key": "key1", + // "operator": "Exists" + // }] + // }] + // }, Annotations: map[string]string{ api.AffinityAnnotationKey: ` {"nodeAffinity": { @@ -2015,51 +2008,121 @@ func TestValidatePod(t *testing.T) { DNSPolicy: api.DNSClusterFirst, }, }, - } - for _, pod := range successCases { - if errs := ValidatePod(&pod); len(errs) != 0 { - t.Errorf("expected success: %v", errs) - } - } - - errorCases := map[string]api.Pod{ - "bad name": { - ObjectMeta: api.ObjectMeta{Name: "", Namespace: "ns"}, - Spec: api.PodSpec{ - RestartPolicy: api.RestartPolicyAlways, - DNSPolicy: api.DNSClusterFirst, - Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + { // Serialized pod affinity in affinity requirements in annotations. + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + // TODO: Uncomment and move this block into Annotations map once + // RequiredDuringSchedulingRequiredDuringExecution is implemented + // "requiredDuringSchedulingRequiredDuringExecution": [{ + // "labelSelector": { + // "matchExpressions": [{ + // "key": "key2", + // "operator": "In", + // "values": ["value1", "value2"] + // }] + // }, + // "namespaces":["ns"], + // "topologyKey": "zone" + // }] + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "topologyKey": "zone", + "namespaces": ["ns"] + }], + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "NotIn", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }] + }}`, + }, }, - }, - "bad namespace": { - ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, - Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, - }, - }, - "bad spec": { - ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "ns"}, - Spec: api.PodSpec{ - Containers: []api.Container{{}}, }, }, - "bad label": { + { // Serialized pod anti affinity with different Label Operators in affinity requirements in annotations. ObjectMeta: api.ObjectMeta{ - Name: "abc", + Name: "123", Namespace: "ns", - Labels: map[string]string{ - "NoUppercaseOrSpecialCharsLike=Equals": "bar", + // TODO: Uncomment and move this block into Annotations map once + // RequiredDuringSchedulingRequiredDuringExecution is implemented + // "requiredDuringSchedulingRequiredDuringExecution": [{ + // "labelSelector": { + // "matchExpressions": [{ + // "key": "key2", + // "operator": "In", + // "values": ["value1", "value2"] + // }] + // }, + // "namespaces":["ns"], + // "topologyKey": "zone" + // }] + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "Exists" + }] + }, + "topologyKey": "zone", + "namespaces": ["ns"] + }], + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "DoesNotExist" + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }] + }}`, }, }, Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, - Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, }, }, - "invalid json of affinity in pod annotations": { + } + for _, pod := range successCases { + if errs := ValidatePod(&pod); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]api.Pod{ + "invalid json of node affinity in pod annotations": { ObjectMeta: api.ObjectMeta{ Name: "123", Namespace: "ns", @@ -2077,7 +2140,7 @@ func TestValidatePod(t *testing.T) { DNSPolicy: api.DNSClusterFirst, }, }, - "invalid node selector requirement in affinity in pod annotations, operator can't be null": { + "invalid node selector requirement in node affinity in pod annotations, operator can't be null": { ObjectMeta: api.ObjectMeta{ Name: "123", Namespace: "ns", @@ -2098,7 +2161,7 @@ func TestValidatePod(t *testing.T) { DNSPolicy: api.DNSClusterFirst, }, }, - "invalid preferredSchedulingTerm in affinity in pod annotations, weight should be in range 1-100": { + "invalid preferredSchedulingTerm in node affinity in pod annotations, weight should be in range 1-100": { ObjectMeta: api.ObjectMeta{ Name: "123", Namespace: "ns", @@ -2164,6 +2227,209 @@ func TestValidatePod(t *testing.T) { DNSPolicy: api.DNSClusterFirst, }, }, + "invalid weight in preferredDuringSchedulingIgnoredDuringExecution in pod affinity annotations, weight should be in range 1-100": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 109, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "NotIn", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid labelSelector in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, values should be empty if the operator is Exists": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "Exists", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid name space in preferredDuringSchedulingIgnoredDuringExecution in podaffinity annotations, name space shouldbe valid": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "Exists", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["INVALID_NAMESPACE"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid labelOperator in preferredDuringSchedulingIgnoredDuringExecution in podantiaffinity annotations, labelOperator should be proper": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "WrongOp", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "region" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid pod affinity, empty topologyKey is not allowed for hard pod affinity": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid pod anti-affinity, empty topologyKey is not allowed for hard pod anti-affinity": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, + "invalid pod anti-affinity, empty topologyKey is not allowed for soft pod affinity": { + ObjectMeta: api.ObjectMeta{ + Name: "123", + Namespace: "ns", + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 10, + "podAffinityTerm": + { + "labelSelector": { + "matchExpressions": [{ + "key": "key2", + "operator": "In", + "values": ["value1", "value2"] + }] + }, + "namespaces": ["ns"], + "topologyKey": "" + } + }]}}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}, + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + }, + }, } for k, v := range errorCases { if errs := ValidatePod(&v); len(errs) == 0 { diff --git a/pkg/apis/apps/validation/validation.go b/pkg/apis/apps/validation/validation.go index 3180f28a4b180..ce83ef8c52069 100644 --- a/pkg/apis/apps/validation/validation.go +++ b/pkg/apis/apps/validation/validation.go @@ -56,7 +56,7 @@ func ValidatePodTemplateSpecForPetSet(template *api.PodTemplateSpec, selector la // fail. We should really check that the union of the given volumes and volumeClaims match // volume mounts in the containers. // allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) - allErrs = append(allErrs, apivalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(template.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, apivalidation.ValidateAnnotations(template.Annotations, fldPath.Child("annotations"))...) allErrs = append(allErrs, apivalidation.ValidatePodSpecificAnnotations(template.Annotations, fldPath.Child("annotations"))...) } diff --git a/pkg/apis/componentconfig/deep_copy_generated.go b/pkg/apis/componentconfig/deep_copy_generated.go index 5a0fdad22ecd4..856c890a204ad 100644 --- a/pkg/apis/componentconfig/deep_copy_generated.go +++ b/pkg/apis/componentconfig/deep_copy_generated.go @@ -189,6 +189,8 @@ func DeepCopy_componentconfig_KubeSchedulerConfiguration(in KubeSchedulerConfigu out.KubeAPIQPS = in.KubeAPIQPS out.KubeAPIBurst = in.KubeAPIBurst out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { return err } diff --git a/pkg/apis/componentconfig/types.generated.go b/pkg/apis/componentconfig/types.generated.go index 750ecc70ce9ca..61da2be044d98 100644 --- a/pkg/apis/componentconfig/types.generated.go +++ b/pkg/apis/componentconfig/types.generated.go @@ -5051,16 +5051,16 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep2 := !z.EncBinary() yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [12]bool + var yyq2 [14]bool _, _, _ = yysep2, yyq2, yy2arr2 const yyr2 bool = false - yyq2[10] = x.Kind != "" - yyq2[11] = x.APIVersion != "" + yyq2[12] = x.Kind != "" + yyq2[13] = x.APIVersion != "" var yynn2 int if yyr2 || yy2arr2 { - r.EncodeArrayStart(12) + r.EncodeArrayStart(14) } else { - yynn2 = 10 + yynn2 = 12 for _, b := range yyq2 { if b { yynn2++ @@ -5242,20 +5242,58 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy31 := &x.LeaderElection - yy31.CodecEncodeSelf(e) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failureDomains")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy37 := &x.LeaderElection + yy37.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.LeaderElection - yy33.CodecEncodeSelf(e) + yy39 := &x.LeaderElection + yy39.CodecEncodeSelf(e) } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - yym36 := z.EncBinary() - _ = yym36 + if yyq2[12] { + yym42 := z.EncBinary() + _ = yym42 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -5264,12 +5302,12 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[10] { + if yyq2[12] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym37 := z.EncBinary() - _ = yym37 + yym43 := z.EncBinary() + _ = yym43 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -5278,9 +5316,9 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr2 || yy2arr2 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - yym39 := z.EncBinary() - _ = yym39 + if yyq2[13] { + yym45 := z.EncBinary() + _ = yym45 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -5289,12 +5327,12 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq2[11] { + if yyq2[13] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym40 := z.EncBinary() - _ = yym40 + yym46 := z.EncBinary() + _ = yym46 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -5416,12 +5454,24 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. } else { x.SchedulerName = string(r.DecodeString()) } + case "hardPodAffinitySymmetricWeight": + if r.TryDecodeAsNil() { + x.HardPodAffinitySymmetricWeight = 0 + } else { + x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) + } + case "failureDomains": + if r.TryDecodeAsNil() { + x.FailureDomains = "" + } else { + x.FailureDomains = string(r.DecodeString()) + } case "leaderElection": if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv13 := &x.LeaderElection - yyv13.CodecDecodeSelf(d) + yyv15 := &x.LeaderElection + yyv15.CodecDecodeSelf(d) } case "kind": if r.TryDecodeAsNil() { @@ -5446,16 +5496,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5465,13 +5515,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Port = int32(r.DecodeInt(32)) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5481,13 +5531,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Address = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5497,13 +5547,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.AlgorithmProvider = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5513,13 +5563,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.PolicyConfigFile = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5529,13 +5579,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.EnableProfiling = bool(r.DecodeBool()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5545,13 +5595,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.ContentType = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5561,13 +5611,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.KubeAPIQPS = float32(r.DecodeFloat(true)) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5577,13 +5627,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5593,13 +5643,45 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.SchedulerName = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HardPodAffinitySymmetricWeight = 0 + } else { + x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FailureDomains = "" + } else { + x.FailureDomains = string(r.DecodeString()) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5607,16 +5689,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv26 := &x.LeaderElection - yyv26.CodecDecodeSelf(d) + yyv30 := &x.LeaderElection + yyv30.CodecDecodeSelf(d) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5626,13 +5708,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Kind = string(r.DecodeString()) } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5643,17 +5725,17 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 x.APIVersion = string(r.DecodeString()) } for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l } else { - yyb16 = r.CheckBreak() + yyb18 = r.CheckBreak() } - if yyb16 { + if yyb18 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") + z.DecStructFieldNotFound(yyj18-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } diff --git a/pkg/apis/componentconfig/types.go b/pkg/apis/componentconfig/types.go index 279d95f957433..aff882c571e34 100644 --- a/pkg/apis/componentconfig/types.go +++ b/pkg/apis/componentconfig/types.go @@ -372,6 +372,12 @@ type KubeSchedulerConfiguration struct { // will be processed by this scheduler, based on pod's annotation with // key 'scheduler.alpha.kubernetes.io/name'. SchedulerName string `json:"schedulerName"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string `json:"failureDomains"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` } diff --git a/pkg/apis/componentconfig/v1alpha1/conversion_generated.go b/pkg/apis/componentconfig/v1alpha1/conversion_generated.go index f37b1d3d1d1f3..e9b3023263f35 100644 --- a/pkg/apis/componentconfig/v1alpha1/conversion_generated.go +++ b/pkg/apis/componentconfig/v1alpha1/conversion_generated.go @@ -163,6 +163,8 @@ func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSche out.KubeAPIQPS = in.KubeAPIQPS out.KubeAPIBurst = int32(in.KubeAPIBurst) out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { return err } @@ -191,6 +193,8 @@ func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSche out.KubeAPIQPS = in.KubeAPIQPS out.KubeAPIBurst = int(in.KubeAPIBurst) out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { return err } diff --git a/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go b/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go index 42541d5c0966a..f4f9fc9322853 100644 --- a/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go +++ b/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go @@ -98,6 +98,8 @@ func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out.KubeAPIQPS = in.KubeAPIQPS out.KubeAPIBurst = in.KubeAPIBurst out.SchedulerName = in.SchedulerName + out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight + out.FailureDomains = in.FailureDomains if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { return err } diff --git a/pkg/apis/componentconfig/v1alpha1/defaults.go b/pkg/apis/componentconfig/v1alpha1/defaults.go index c52a34959661d..a199b04ac9a15 100644 --- a/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -89,6 +89,12 @@ func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { if obj.SchedulerName == "" { obj.SchedulerName = api.DefaultSchedulerName } + if obj.HardPodAffinitySymmetricWeight == 0 { + obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight + } + if obj.FailureDomains == "" { + obj.FailureDomains = api.DefaultFailureDomains + } } func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { diff --git a/pkg/apis/componentconfig/v1alpha1/types.go b/pkg/apis/componentconfig/v1alpha1/types.go index abffc15d42f83..2ae65d87da1d9 100644 --- a/pkg/apis/componentconfig/v1alpha1/types.go +++ b/pkg/apis/componentconfig/v1alpha1/types.go @@ -105,6 +105,12 @@ type KubeSchedulerConfiguration struct { // will be processed by this scheduler, based on pod's annotation with // key 'scheduler.alpha.kubernetes.io/name'. SchedulerName string `json:"schedulerName"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string `json:"failureDomains"` // leaderElection defines the configuration of leader election client. LeaderElection LeaderElectionConfiguration `json:"leaderElection"` } diff --git a/plugin/cmd/kube-scheduler/app/options/options.go b/plugin/cmd/kube-scheduler/app/options/options.go index 89f9d79b6acc2..542bf85757a78 100644 --- a/plugin/cmd/kube-scheduler/app/options/options.go +++ b/plugin/cmd/kube-scheduler/app/options/options.go @@ -67,5 +67,9 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) { fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver") fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver") fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'") + fs.IntVar(&s.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", api.DefaultHardPodAffinitySymmetricWeight, + "RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+ + "to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.") + fs.StringVar(&s.FailureDomains, "failure-domains", api.DefaultFailureDomains, "Indicate the \"all topologies\" set for an empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity.") leaderelection.BindFlags(&s.LeaderElection, fs) } diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index c4f788397e37c..16d5e87eb67f0 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -107,7 +107,7 @@ func Run(s *options.SchedulerServer) error { glog.Fatal(server.ListenAndServe()) }() - configFactory := factory.NewConfigFactory(kubeClient, s.SchedulerName) + configFactory := factory.NewConfigFactory(kubeClient, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains) config, err := createConfig(s, configFactory) if err != nil { diff --git a/plugin/pkg/scheduler/algorithm/predicates/error.go b/plugin/pkg/scheduler/algorithm/predicates/error.go index 9fae8a8a16633..466590d4602c9 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/error.go +++ b/plugin/pkg/scheduler/algorithm/predicates/error.go @@ -30,6 +30,7 @@ var ( ErrDiskConflict = newPredicateFailureError("NoDiskConflict") ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict") ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector") + ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity") ErrPodNotMatchHostName = newPredicateFailureError("HostName") ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts") ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence") diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 229761bfc012d..a019a01c2e273 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -19,14 +19,14 @@ package predicates import ( "fmt" + "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api/unversioned" ) type NodeInfo interface { @@ -723,3 +723,198 @@ func GeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercache.N } return true, nil } + +type PodAffinityChecker struct { + info NodeInfo + podLister algorithm.PodLister + failureDomains priorityutil.Topologies +} + +func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failureDomains []string) algorithm.FitPredicate { + checker := &PodAffinityChecker{ + info: info, + podLister: podLister, + failureDomains: priorityutil.Topologies{DefaultKeys: failureDomains}, + } + return checker.InterPodAffinityMatches +} + +func (checker *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) { + node, err := checker.info.GetNodeInfo(nodeName) + if err != nil { + return false, err + } + allPods, err := checker.podLister.List(labels.Everything()) + if err != nil { + return false, err + } + if checker.NodeMatchPodAffinityAntiAffinity(pod, allPods, node) { + return true, nil + } + return false, ErrPodAffinityNotMatch +} + +// AnyPodMatchesPodAffinityTerm checks if any of given pods can match the specific podAffinityTerm. +func (checker *PodAffinityChecker) AnyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinityTerm api.PodAffinityTerm) (bool, error) { + for _, ep := range allPods { + match, err := checker.failureDomains.CheckIfPodMatchPodAffinityTerm(ep, pod, podAffinityTerm, + func(ep *api.Pod) (*api.Node, error) { return checker.info.GetNodeInfo(ep.Spec.NodeName) }, + func(pod *api.Pod) (*api.Node, error) { return node, nil }, + ) + if err != nil || match { + return match, err + } + } + return false, nil +} + +// Checks whether the given node has pods which satisfy all the required pod affinity scheduling rules. +// If node has pods which satisfy all the required pod affinity scheduling rules then return true. +func (checker *PodAffinityChecker) NodeMatchesHardPodAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAffinity *api.PodAffinity) bool { + var podAffinityTerms []api.PodAffinityTerm + if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { + podAffinityTerms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution + } + // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. + //if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { + // podAffinityTerms = append(podAffinityTerms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...) + //} + + for _, podAffinityTerm := range podAffinityTerms { + podAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAffinityTerm) + if err != nil { + glog.V(10).Infof("Cannot schedule pod %+v onto node %v, an error ocurred when checking existing pods on the node for PodAffinityTerm %v err: %v", + podName(pod), node.Name, podAffinityTerm, err) + return false + } + + if !podAffinityTermMatches { + // TODO: Think about whether this can be simplified once we have controllerRef + // Check if it is in special case that the requiredDuringScheduling affinity requirement can be disregarded. + // If the requiredDuringScheduling affinity requirement matches a pod's own labels and namespace, and there are no other such pods + // anywhere, then disregard the requirement. + // This allows rules like "schedule all of the pods of this collection to the same zone" to not block forever + // because the first pod of the collection can't be scheduled. + names := priorityutil.GetNamespacesFromPodAffinityTerm(pod, podAffinityTerm) + labelSelector, err := unversioned.LabelSelectorAsSelector(podAffinityTerm.LabelSelector) + if err != nil || !names.Has(pod.Namespace) || !labelSelector.Matches(labels.Set(pod.Labels)) { + glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v, err: %+v", + podName(pod), node.Name, podAffinityTerm, err) + return false + } + + // the affinity is to put the pod together with other pods from its same service or controller + filteredPods := priorityutil.FilterPodsByNameSpaces(names, allPods) + for _, filteredPod := range filteredPods { + // if found an existing pod from same service or RC, + // the affinity scheduling rules cannot be disregarded. + if labelSelector.Matches(labels.Set(filteredPod.Labels)) { + glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because none of the existing pods on this node satisfy the PodAffinityTerm %v", + podName(pod), node.Name, podAffinityTerm) + return false + } + } + } + } + // all the required pod affinity scheduling rules satisfied + glog.V(10).Infof("All the required pod affinity scheduling rules are satisfied for Pod %+v, on node %v", podName(pod), node.Name) + return true +} + +// Checks whether the given node has pods which satisfy all the +// required pod anti-affinity scheduling rules. +// Also checks whether putting the pod onto the node would break +// any anti-affinity scheduling rules indicated by existing pods. +// If node has pods which satisfy all the required pod anti-affinity +// scheduling rules and scheduling the pod onto the node won't +// break any existing pods' anti-affinity rules, then return true. +func (checker *PodAffinityChecker) NodeMatchesHardPodAntiAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node, podAntiAffinity *api.PodAntiAffinity) bool { + var podAntiAffinityTerms []api.PodAffinityTerm + if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { + podAntiAffinityTerms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution + } + // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. + //if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { + // podAntiAffinityTerms = append(podAntiAffinityTerms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) + //} + + // foreach element podAntiAffinityTerm of podAntiAffinityTerms + // if the pod matches the term (breaks the anti-affinity), + // don't schedule the pod onto this node. + for _, podAntiAffinityTerm := range podAntiAffinityTerms { + podAntiAffinityTermMatches, err := checker.AnyPodMatchesPodAffinityTerm(pod, allPods, node, podAntiAffinityTerm) + if err != nil || podAntiAffinityTermMatches == true { + glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because not all the existing pods on this node satisfy the PodAntiAffinityTerm %v, err: %v", + podName(pod), node.Name, podAntiAffinityTerm, err) + return false + } + } + + // Check if scheduling the pod onto this node would break + // any anti-affinity rules indicated by the existing pods on the node. + // If it would break, system should not schedule pod onto this node. + for _, ep := range allPods { + epAffinity, err := api.GetAffinityFromPodAnnotations(ep.Annotations) + if err != nil { + glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err) + return false + } + if epAffinity.PodAntiAffinity != nil { + var epAntiAffinityTerms []api.PodAffinityTerm + if len(epAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { + epAntiAffinityTerms = epAffinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution + } + // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. + //if len(epAffinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { + // epAntiAffinityTerms = append(epAntiAffinityTerms, epAffinity.PodAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) + //} + + for _, epAntiAffinityTerm := range epAntiAffinityTerms { + labelSelector, err := unversioned.LabelSelectorAsSelector(epAntiAffinityTerm.LabelSelector) + if err != nil { + glog.V(10).Infof("Failed to get label selector from anti-affinityterm %+v of existing pod %+v, err: %+v", epAntiAffinityTerm, podName(pod), err) + return false + } + + names := priorityutil.GetNamespacesFromPodAffinityTerm(ep, epAntiAffinityTerm) + if (len(names) == 0 || names.Has(pod.Namespace)) && labelSelector.Matches(labels.Set(pod.Labels)) { + epNode, err := checker.info.GetNodeInfo(ep.Spec.NodeName) + if err != nil || checker.failureDomains.NodesHaveSameTopologyKey(node, epNode, epAntiAffinityTerm.TopologyKey) { + glog.V(10).Infof("Cannot schedule Pod %+v, onto node %v because the pod would break the PodAntiAffinityTerm %+v, of existing pod %+v, err: %v", + podName(pod), node.Name, epAntiAffinityTerm, podName(ep), err) + return false + } + } + } + } + } + // all the required pod anti-affinity scheduling rules are satisfied + glog.V(10).Infof("Can schedule Pod %+v, on node %v because all the required pod anti-affinity scheduling rules are satisfied", podName(pod), node.Name) + return true +} + +// NodeMatchPodAffinityAntiAffinity checks if the node matches +// the requiredDuringScheduling affinity/anti-affinity rules indicated by the pod. +func (checker *PodAffinityChecker) NodeMatchPodAffinityAntiAffinity(pod *api.Pod, allPods []*api.Pod, node *api.Node) bool { + // Parse required affinity scheduling rules. + affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) + if err != nil { + glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err) + return false + } + + // check if the current node match the inter-pod affinity scheduling rules. + if affinity.PodAffinity != nil { + if !checker.NodeMatchesHardPodAffinity(pod, allPods, node, affinity.PodAffinity) { + return false + } + } + + // check if the current node match the inter-pod anti-affinity scheduling rules. + if affinity.PodAntiAffinity != nil { + if !checker.NodeMatchesHardPodAntiAffinity(pod, allPods, node, affinity.PodAntiAffinity) { + return false + } + } + return true +} diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index 73f71dc3d78b6..07ee7108d2056 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -20,12 +20,14 @@ import ( "fmt" "os/exec" "reflect" + "strings" "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/util/codeinspector" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -1564,3 +1566,673 @@ func TestRunGeneralPredicates(t *testing.T) { } } } + +func TestInterPodAffinity(t *testing.T) { + podLabel := map[string]string{"service": "securityscan"} + labels1 := map[string]string{ + "region": "r1", + "zone": "z11", + } + podLabel2 := map[string]string{"security": "S1"} + node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}} + tests := []struct { + pod *api.Pod + pods []*api.Pod + node api.Node + fits bool + test string + }{ + { + pod: new(api.Pod), + node: node1, + fits: true, + test: "A pod that has no required pod affinity scheduling rules can schedule onto a node with no existing pods", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: true, + test: "satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "NotIn", + "values": ["securityscan3", "value3"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: true, + test: "satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "namespaces":["DiffNameSpace"] + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel, Namespace: "ns"}}}, + node: node1, + fits: false, + test: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + } + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: false, + test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "topologyKey": "region" + }, { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan"] + }, { + "key": "service", + "operator": "NotIn", + "values": ["WrongValue"] + }] + }, + "topologyKey": "region" + } + ] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: true, + test: "satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution ", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "topologyKey": "region" + }, { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan2"] + }, { + "key": "service", + "operator": "NotIn", + "values": ["WrongValue"] + }] + }, + "topologyKey": "region" + } + ] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: false, + test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node becasue one of the matchExpression item don't match.", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: true, + test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod", + }, + // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. + //{ + // pod: &api.Pod{ + // ObjectMeta: api.ObjectMeta{ + // Labels: podLabel2, + // Annotations: map[string]string{ + // api.AffinityAnnotationKey: ` + // {"podAffinity": { + // "requiredDuringSchedulingRequiredDuringExecution": [ + // { + // "labelSelector": { + // "matchExpressions": [{ + // "key": "service", + // "operator": "Exists" + // }, { + // "key": "wrongkey", + // "operator": "DoesNotExist" + // }] + // }, + // "topologyKey": "region" + // }, { + // "labelSelector": { + // "matchExpressions": [{ + // "key": "service", + // "operator": "In", + // "values": ["securityscan"] + // }, { + // "key": "service", + // "operator": "NotIn", + // "values": ["WrongValue"] + // }] + // }, + // "topologyKey": "region" + // } + // ] + // }}`, + // }, + // }, + // }, + // pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podlabel}}}, + // node: node1, + // fits: true, + // test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ", + //}, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, + ObjectMeta: api.ObjectMeta{Labels: podLabel, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"PodAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }}, + }}, + node: node1, + fits: true, + test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: false, + test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, + ObjectMeta: api.ObjectMeta{Labels: podLabel, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"PodAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }}, + }}, + node: node1, + fits: false, + test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: podLabel, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "NotIn", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + node: node1, + fits: false, + test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels", + }, + } + for _, test := range tests { + node := test.node + var podsOnNode []*api.Pod + for _, pod := range test.pods { + if pod.Spec.NodeName == node.Name { + podsOnNode = append(podsOnNode, pod) + } + } + + fit := PodAffinityChecker{ + info: FakeNodeInfo(node), + podLister: algorithm.FakePodLister(test.pods), + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + } + fits, err := fit.InterPodAffinityMatches(test.pod, test.node.Name, schedulercache.NewNodeInfo(podsOnNode...)) + if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil { + t.Errorf("%s: unexpected error %v", test.test, err) + } + if fits != test.fits { + t.Errorf("%s: expected %v got %v", test.test, test.fits, fits) + } + } +} + +func TestInterPodAffinityWithMultipleNodes(t *testing.T) { + podLabelA := map[string]string{ + "foo": "bar", + } + labelRgChina := map[string]string{ + "region": "China", + } + labelRgChinaAzAz1 := map[string]string{ + "region": "China", + "az": "az1", + } + labelRgIndia := map[string]string{ + "region": "India", + } + tests := []struct { + pod *api.Pod + pods []*api.Pod + nodes []api.Node + fits map[string]bool + test string + }{ + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["bar"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelA}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + }, + fits: map[string]bool{ + "machine1": true, + "machine2": true, + "machine3": false, + }, + test: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "hostname", + "operator": "NotIn", + "values": ["h1"] + }] + }] + } + }, + "podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["abc"] + }] + }, + "topologyKey": "region" + }] + } + }`, + }, + }, + }, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "nodeA"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + {Spec: api.PodSpec{NodeName: "nodeB"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "def"}}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}}, + {ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": true, + }, + test: "NodeA and nodeB have same topologyKey and label value. NodeA does not satisfy node affinity rule, but has an existing pod that match the inter pod affinity rule. The pod can be scheduled onto nodeB.", + }, + { + pod: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["bar"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }, + }, + }, + pods: []*api.Pod{}, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}}, + {ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}}, + }, + fits: map[string]bool{ + "nodeA": true, + "nodeB": true, + }, + test: "The affinity rule is to schedule all of the pods of this collection to the same zone. The first pod of the collection " + + "should not be blocked from being scheduled onto any node, even there's no existing pod that match the rule anywhere.", + }, + } + for _, test := range tests { + nodeListInfo := FakeNodeListInfo(test.nodes) + for _, node := range test.nodes { + var podsOnNode []*api.Pod + for _, pod := range test.pods { + if pod.Spec.NodeName == node.Name { + podsOnNode = append(podsOnNode, pod) + } + } + + testFit := PodAffinityChecker{ + info: nodeListInfo, + podLister: algorithm.FakePodLister(test.pods), + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + } + fits, err := testFit.InterPodAffinityMatches(test.pod, node.Name, schedulercache.NewNodeInfo(podsOnNode...)) + if !reflect.DeepEqual(err, ErrPodAffinityNotMatch) && err != nil { + t.Errorf("%s: unexpected error %v", test.test, err) + } + affinity, err := api.GetAffinityFromPodAnnotations(test.pod.ObjectMeta.Annotations) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if affinity.NodeAffinity != nil { + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(&node) + fits2, err := PodSelectorMatches(test.pod, node.Name, nodeInfo) + if !reflect.DeepEqual(err, ErrNodeSelectorNotMatch) && err != nil { + t.Errorf("unexpected error: %v", err) + } + fits = fits && fits2 + } + + if fits != test.fits[node.Name] { + t.Errorf("%s: expected %v for %s got %v", test.test, test.fits[node.Name], node.Name, fits) + } + } + } +} diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go new file mode 100644 index 0000000000000..c1a84862a0193 --- /dev/null +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -0,0 +1,216 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package priorities + +import ( + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +type InterPodAffinity struct { + info predicates.NodeInfo + nodeLister algorithm.NodeLister + podLister algorithm.PodLister + hardPodAffinityWeight int + failureDomains priorityutil.Topologies +} + +func NewInterPodAffinityPriority(info predicates.NodeInfo, nodeLister algorithm.NodeLister, podLister algorithm.PodLister, hardPodAffinityWeight int, failureDomains []string) algorithm.PriorityFunction { + interPodAffinity := &InterPodAffinity{ + info: info, + nodeLister: nodeLister, + podLister: podLister, + hardPodAffinityWeight: hardPodAffinityWeight, + failureDomains: priorityutil.Topologies{DefaultKeys: failureDomains}, + } + return interPodAffinity.CalculateInterPodAffinityPriority +} + +// countPodsThatMatchPodAffinityTerm counts the number of given pods that match the podAffinityTerm. +func (ipa *InterPodAffinity) CountPodsThatMatchPodAffinityTerm(pod *api.Pod, podsForMatching []*api.Pod, node *api.Node, podAffinityTerm api.PodAffinityTerm) (int, error) { + matchedCount := 0 + for _, ep := range podsForMatching { + match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(ep, pod, podAffinityTerm, + func(ep *api.Pod) (*api.Node, error) { + return ipa.info.GetNodeInfo(ep.Spec.NodeName) + }, + func(pod *api.Pod) (*api.Node, error) { + return node, nil + }, + ) + if err != nil { + return 0, err + } + if match { + matchedCount++ + } + } + return matchedCount, nil +} + +// CountWeightByPodMatchAffinityTerm counts the weight to topologyCounts for all the given pods that match the podAffinityTerm. +func (ipa *InterPodAffinity) CountWeightByPodMatchAffinityTerm(pod *api.Pod, podsForMatching []*api.Pod, weight int, podAffinityTerm api.PodAffinityTerm, node *api.Node) (int, error) { + if weight == 0 { + return 0, nil + } + // get the pods which are there in that particular node + podsMatchedCount, err := ipa.CountPodsThatMatchPodAffinityTerm(pod, podsForMatching, node, podAffinityTerm) + return weight * podsMatchedCount, err +} + +// compute a sum by iterating through the elements of weightedPodAffinityTerm and adding +// "weight" to the sum if the corresponding PodAffinityTerm is satisfied for +// that node; the node(s) with the highest sum are the most preferred. +// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, +// symmetry need to be considered for hard requirements from podAffinity +func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) { + nodes, err := nodeLister.List() + if err != nil { + return nil, err + } + allPods, err := ipa.podLister.List(labels.Everything()) + if err != nil { + return nil, err + } + affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) + if err != nil { + return nil, err + } + + // convert the topology key based weights to the node name based weights + var maxCount int + var minCount int + counts := map[string]int{} + for _, node := range nodes.Items { + totalCount := 0 + // count weights for the weighted pod affinity + if affinity.PodAffinity != nil { + for _, weightedTerm := range affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, weightedTerm.Weight, weightedTerm.PodAffinityTerm, &node) + if err != nil { + return nil, err + } + totalCount += weightedCount + } + } + + // count weights for the weighted pod anti-affinity + if affinity.PodAntiAffinity != nil { + for _, weightedTerm := range affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + weightedCount, err := ipa.CountWeightByPodMatchAffinityTerm(pod, allPods, (0 - weightedTerm.Weight), weightedTerm.PodAffinityTerm, &node) + if err != nil { + return nil, err + } + totalCount += weightedCount + } + } + + // reverse direction checking: count weights for the inter-pod affinity/anti-affinity rules + // that are indicated by existing pods on the node. + for _, ep := range allPods { + epAffinity, err := api.GetAffinityFromPodAnnotations(ep.Annotations) + if err != nil { + return nil, err + } + + if epAffinity.PodAffinity != nil { + // count the implicit weight for the hard pod affinity indicated by the existing pod. + if ipa.hardPodAffinityWeight > 0 { + var podAffinityTerms []api.PodAffinityTerm + if len(epAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { + podAffinityTerms = epAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution + } + // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. + //if len(affinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { + // podAffinityTerms = append(podAffinityTerms, affinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...) + //} + for _, epAffinityTerm := range podAffinityTerms { + match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epAffinityTerm, + func(pod *api.Pod) (*api.Node, error) { return &node, nil }, + func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) }, + ) + if err != nil { + return nil, err + } + if match { + totalCount += ipa.hardPodAffinityWeight + } + } + } + + // count weight for the weighted pod affinity indicated by the existing pod. + for _, epWeightedTerm := range epAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm, + func(pod *api.Pod) (*api.Node, error) { return &node, nil }, + func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) }, + ) + if err != nil { + return nil, err + } + if match { + totalCount += epWeightedTerm.Weight + } + } + } + + // count weight for the weighted pod anti-affinity indicated by the existing pod. + if epAffinity.PodAntiAffinity != nil { + for _, epWeightedTerm := range epAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + match, err := ipa.failureDomains.CheckIfPodMatchPodAffinityTerm(pod, ep, epWeightedTerm.PodAffinityTerm, + func(pod *api.Pod) (*api.Node, error) { return &node, nil }, + func(ep *api.Pod) (*api.Node, error) { return ipa.info.GetNodeInfo(ep.Spec.NodeName) }, + ) + if err != nil { + return nil, err + } + if match { + totalCount -= epWeightedTerm.Weight + } + } + } + } + + counts[node.Name] = totalCount + if counts[node.Name] > maxCount { + maxCount = counts[node.Name] + } + if counts[node.Name] < minCount { + minCount = counts[node.Name] + } + } + + // calculate final priority score for each node + result := []schedulerapi.HostPriority{} + for _, node := range nodes.Items { + fScore := float64(0) + if (maxCount - minCount) > 0 { + fScore = 10 * (float64(counts[node.Name]-minCount) / float64(maxCount-minCount)) + } + result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) + glog.V(10).Infof( + "%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore), + ) + } + + return result, nil +} diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go new file mode 100644 index 0000000000000..c130f1e75239a --- /dev/null +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -0,0 +1,688 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package priorities + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" + schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" +) + +type FakeNodeListInfo []api.Node + +func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) { + for _, node := range nodes { + if node.Name == nodeName { + return &node, nil + } + } + return nil, fmt.Errorf("Unable to find node: %s", nodeName) +} + +func TestInterPodAffinityPriority(t *testing.T) { + labelRgChina := map[string]string{ + "region": "China", + } + labelRgIndia := map[string]string{ + "region": "India", + } + labelAzAz1 := map[string]string{ + "az": "az1", + } + labelAzAz2 := map[string]string{ + "az": "az2", + } + labelRgChinaAzAz1 := map[string]string{ + "region": "China", + "az": "az1", + } + podLabelSecurityS1 := map[string]string{ + "security": "S1", + } + podLabelSecurityS2 := map[string]string{ + "security": "S2", + } + // considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity + stayWithS1InRegion := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }] + }}`, + } + stayWithS2InRegion := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 6, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }] + }}`, + } + affinity3 := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 8, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "NotIn", + "values":["S1"] + }, { + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }, { + "weight": 2, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "namespaces": [], + "topologyKey": "region" + } + } + ] + }}`, + } + hardAffinity := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector":{ + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values": ["S1", "value2"] + }] + }, + "namespaces": [], + "topologyKey": "region" + }, { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "namespaces": [], + "topologyKey": "region" + } + ] + }}`, + } + awayFromS1InAz := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "az" + } + }] + }}`, + } + // to stay away from security S2 in any az. + awayFromS2InAz := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "az" + } + }] + }}`, + } + // to stay with security S1 in same region, stay away from security S2 in any az. + stayWithS1InRegionAwayFromS2InAz := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 8, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }] + }, + "podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "az" + } + }] + }}`, + } + + tests := []struct { + pod *api.Pod + pods []*api.Pod + nodes []api.Node + expectedList schedulerapi.HostPriorityList + test string + }{ + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}}, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}}, + test: "all machines are same priority as Affinity is nil", + }, + // the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score + // the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score + // the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}}, + test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + + "which doesn't match either pods in nodes or in topology key", + }, + // the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score + // the node2(machine2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1 + // the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector, + // get a low score. + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegion}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}}, + test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", + }, + // there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference. + // But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia. + // Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score), + // while all the nodes in regionIndia should get another same score(low score). + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 5}, {"machine3", 10}, {"machine4", 10}, {"machine5", 5}}, + test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", + }, + // Test with the different operators and values for pod affinity scheduling preference, including some match failures. + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 2}, {"machine2", 10}, {"machine3", 0}}, + test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", + }, + // Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, + // but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference. + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 0}}, + test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + }, + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}}, + test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + }, + + // The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity. + // the nodes that have the label {"node": "bar"} (match the topology key) and that have existing pods that match the labelSelector get low score + // the nodes that don't have the label {"node": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get high score + // the nodes that have the label {"node": "bar"} (match the topology key) but that have existing pods that mismatch the labelSelector get high score + // there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels. + // But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2. + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}}, + test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", + }, + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}}, + test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", + }, + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}}, + test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", + }, + // Test the symmetry cases for anti affinity + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}}, + test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", + }, + // Test both affinity and anti-affinity + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}}, + test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", + }, + // Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service), + // the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level, + // so that all the pods of a RC/service can stay in a same region but trying to separate with each other + // machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 4}, {"machine3", 10}, {"machine4", 10}, {"machine5", 4}}, + test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", + }, + // Consider Affinity, Anti Affinity and symmetry together. + // for Affinity, the weights are: 8, 0, 0, 0 + // for Anti Affinity, the weights are: 0, -5, 0, 0 + // for Affinity symmetry, the weights are: 0, 0, 8, 0 + // for Anti Affinity symmetry, the weights are: 0, 0, 0, -5 + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}}, + {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, + }, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 10}, {"machine4", 0}}, + test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", + }, + } + for _, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) + interPodAffinity := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), + podLister: algorithm.FakePodLister(test.pods), + hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + } + list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) + } + } +} + +func TestHardPodAffinitySymmetricWeight(t *testing.T) { + podLabelServiceS1 := map[string]string{ + "service": "S1", + } + labelRgChina := map[string]string{ + "region": "China", + } + labelRgIndia := map[string]string{ + "region": "India", + } + labelAzAz1 := map[string]string{ + "az": "az1", + } + hardPodAffinity := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector":{ + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["S1"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + ] + }}`, + } + tests := []struct { + pod *api.Pod + pods []*api.Pod + nodes []api.Node + hardPodAffinityWeight int + expectedList schedulerapi.HostPriorityList + test string + }{ + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, + expectedList: []schedulerapi.HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 0}}, + test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", + }, + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + hardPodAffinityWeight: 0, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}}, + test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match", + }, + } + for _, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) + ipa := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), + podLister: algorithm.FakePodLister(test.pods), + hardPodAffinityWeight: test.hardPodAffinityWeight, + } + list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) + } + } +} + +func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) { + labelAzAZ1 := map[string]string{ + "az": "az1", + } + LabelZoneFailureDomainAZ1 := map[string]string{ + unversioned.LabelZoneFailureDomain: "az1", + } + podLabel1 := map[string]string{ + "security": "S1", + } + antiAffinity1 := map[string]string{ + api.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "" + } + }] + }}`, + } + tests := []struct { + pod *api.Pod + pods []*api.Pod + nodes []api.Node + failureDomains priorityutil.Topologies + expectedList schedulerapi.HostPriorityList + test string + }{ + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, + }, + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 10}}, + test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.", + }, + { + pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, + pods: []*api.Pod{ + {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, + {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, + }, + nodes: []api.Node{ + {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, + {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, + }, + failureDomains: priorityutil.Topologies{}, + expectedList: []schedulerapi.HostPriority{{"machine1", 0}, {"machine2", 0}}, + test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, and no failure domains indicated, regard as topologyKey not match.", + }, + } + for _, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods) + ipa := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}), + podLister: algorithm.FakePodLister(test.pods), + hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, + failureDomains: test.failureDomains, + } + list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes})) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) + } + } +} diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go b/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go index 4a386eb5a0298..0e9bee1b5f40b 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go +++ b/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go @@ -16,7 +16,12 @@ limitations under the License. package util -import "k8s.io/kubernetes/pkg/api" +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/util/sets" +) // For each of these resources, a pod that doesn't request the resource explicitly // will be treated as having requested the amount indicated below, for the purpose @@ -48,3 +53,83 @@ func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) { } return outMilliCPU, outMemory } + +// FilterPodsByNameSpaces filters the pods based the given list of namespaces, +// empty set of namespaces means all namespaces. +func FilterPodsByNameSpaces(names sets.String, pods []*api.Pod) []*api.Pod { + if len(pods) == 0 || len(names) == 0 { + return pods + } + result := []*api.Pod{} + for _, pod := range pods { + if names.Has(pod.Namespace) { + result = append(result, pod) + } + } + return result +} + +// GetNamespacesFromPodAffinityTerm returns a set of names +// according to the namespaces indicated in podAffinityTerm. +// if the NameSpaces is nil considers the given pod's namespace +// if the Namespaces is empty list then considers all the namespaces +func GetNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String { + names := sets.String{} + if podAffinityTerm.Namespaces == nil { + names.Insert(pod.Namespace) + } else if len(podAffinityTerm.Namespaces) != 0 { + names.Insert(podAffinityTerm.Namespaces...) + } + return names +} + +// NodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key. +func NodesHaveSameTopologyKeyInternal(nodeA, nodeB *api.Node, topologyKey string) bool { + return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey] +} + +type Topologies struct { + DefaultKeys []string +} + +// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key. +// If the topologyKey is nil/empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value. +func (tps *Topologies) NodesHaveSameTopologyKey(nodeA *api.Node, nodeB *api.Node, topologyKey string) bool { + if len(topologyKey) == 0 { + // assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation) + for _, defaultKey := range tps.DefaultKeys { + if NodesHaveSameTopologyKeyInternal(nodeA, nodeB, defaultKey) { + return true + } + } + return false + } else { + return NodesHaveSameTopologyKeyInternal(nodeA, nodeB, topologyKey) + } +} + +type getNodeFunc func(*api.Pod) (*api.Node, error) + +// CheckIfPodMatchPodAffinityTerm checks if podB's affinity request is compatible with podA +func (tps *Topologies) CheckIfPodMatchPodAffinityTerm(podA *api.Pod, podB *api.Pod, podBAffinityTerm api.PodAffinityTerm, getNodeA, getNodeB getNodeFunc) (bool, error) { + names := GetNamespacesFromPodAffinityTerm(podB, podBAffinityTerm) + if len(names) != 0 && !names.Has(podA.Namespace) { + return false, nil + } + + labelSelector, err := unversioned.LabelSelectorAsSelector(podBAffinityTerm.LabelSelector) + if err != nil || !labelSelector.Matches(labels.Set(podA.Labels)) { + return false, err + } + + podANode, err := getNodeA(podA) + if err != nil { + return false, err + } + podBNode, err := getNodeB(podB) + if err != nil { + return false, err + } + + return tps.NodesHaveSameTopologyKey(podANode, podBNode, podBAffinityTerm.TopologyKey), nil +} diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go b/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go index 5dc560082f978..dbd61054aa998 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go @@ -22,6 +22,7 @@ import ( "net/http/httptest" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/client/restclient" client "k8s.io/kubernetes/pkg/client/unversioned" @@ -116,7 +117,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { defer server.Close() client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - if _, err := factory.NewConfigFactory(client, "some-scheduler-name").CreateFromConfig(policy); err != nil { + if _, err := factory.NewConfigFactory(client, "some-scheduler-name", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains).CreateFromConfig(policy); err != nil { t.Errorf("%s: Error constructing: %v", v, err) continue } diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 08a81a371d261..9e9ddb3be8fc6 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -32,8 +32,10 @@ import ( "github.com/golang/glog" ) -// GCE instances can have up to 16 PD volumes attached. -const DefaultMaxGCEPDVolumes = 16 +const ( + // GCE instances can have up to 16 PD volumes attached. + DefaultMaxGCEPDVolumes = 16 +) // getMaxVols checks the max PD volumes environment variable, otherwise returning a default value func getMaxVols(defaultVal int) int { @@ -71,7 +73,7 @@ func init() { }, ) // PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding. - // For backwards compatibility with 1.0, PodFitsPorts is regitered as well. + // For backwards compatibility with 1.0, PodFitsPorts is registered as well. factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts) // ImageLocalityPriority prioritizes nodes based on locality of images requested by a pod. Nodes with larger size // of already-installed packages required by the pod will be preferred over nodes with no already-installed @@ -125,6 +127,13 @@ func defaultPredicates() sets.String { // GeneralPredicates are the predicates that are enforced by all Kubernetes components // (e.g. kubelet and all schedulers) factory.RegisterFitPredicate("GeneralPredicates", predicates.GeneralPredicates), + // Fit is determined by inter-pod affinity. + factory.RegisterFitPredicateFactory( + "MatchInterPodAffinity", + func(args factory.PluginFactoryArgs) algorithm.FitPredicate { + return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister, args.FailureDomains) + }, + ), ) } @@ -153,5 +162,16 @@ func defaultPriorities() sets.String { Weight: 1, }, ), + //pods should be placed in the same topological domain (e.g. same node, same rack, same zone, same power domain, etc.) + //as some other pods, or, conversely, should not be placed in the same topological domain as some other pods. + factory.RegisterPriorityConfigFactory( + "InterPodAffinityPriority", + factory.PriorityConfigFactory{ + Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { + return priorities.NewInterPodAffinityPriority(args.NodeInfo, args.NodeLister, args.PodLister, args.HardPodAffinitySymmetricWeight, args.FailureDomains) + }, + Weight: 1, + }, + ), ) } diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index e54f12a501d2e..c25f6ebd7e6ac 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -21,6 +21,7 @@ package factory import ( "fmt" "math/rand" + "strings" "sync" "sync/atomic" "time" @@ -34,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/sets" + utilvalidation "k8s.io/kubernetes/pkg/util/validation" "k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" @@ -83,10 +85,18 @@ type ConfigFactory struct { // processed by this scheduler, based on pods's annotation key: // 'scheduler.alpha.kubernetes.io/name' SchedulerName string + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. + HardPodAffinitySymmetricWeight int + + // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. + FailureDomains string } // Initializes the factory. -func NewConfigFactory(client *client.Client, schedulerName string) *ConfigFactory { +func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffinitySymmetricWeight int, failureDomains string) *ConfigFactory { stopEverything := make(chan struct{}) schedulerCache := schedulercache.New(30*time.Second, stopEverything) @@ -95,15 +105,17 @@ func NewConfigFactory(client *client.Client, schedulerName string) *ConfigFactor PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc), ScheduledPodLister: &cache.StoreToPodLister{}, // Only nodes in the "Ready" condition with status == "True" are schedulable - NodeLister: &cache.StoreToNodeLister{}, - PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, - PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, - ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, - ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, - ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, - schedulerCache: schedulerCache, - StopEverything: stopEverything, - SchedulerName: schedulerName, + NodeLister: &cache.StoreToNodeLister{}, + PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, + PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, + ServiceLister: &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, + ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, + ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}, + schedulerCache: schedulerCache, + StopEverything: stopEverything, + SchedulerName: schedulerName, + HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight, + FailureDomains: failureDomains, } c.PodLister = schedulerCache @@ -287,6 +299,18 @@ func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler // Creates a scheduler from a set of registered fit predicate keys and priority keys. func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*scheduler.Config, error) { glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys) + + if f.HardPodAffinitySymmetricWeight < 0 || f.HardPodAffinitySymmetricWeight > 100 { + return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 0-100", f.HardPodAffinitySymmetricWeight) + } + + failureDomainArgs := strings.Split(f.FailureDomains, ",") + for _, failureDomain := range failureDomainArgs { + if !utilvalidation.IsQualifiedName(failureDomain) { + return nil, fmt.Errorf("invalid failure domain: %s", failureDomain) + } + } + pluginArgs := PluginFactoryArgs{ PodLister: f.PodLister, ServiceLister: f.ServiceLister, @@ -297,6 +321,8 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, NodeInfo: &predicates.CachedNodeInfo{StoreToNodeLister: f.NodeLister}, PVInfo: f.PVLister, PVCInfo: f.PVCLister, + HardPodAffinitySymmetricWeight: f.HardPodAffinitySymmetricWeight, + FailureDomains: sets.NewString(failureDomainArgs...).List(), } predicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs) if err != nil { diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index 611bbe1c76a3e..d2ad5b716428a 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -47,7 +47,7 @@ func TestCreate(t *testing.T) { server := httptest.NewServer(&handler) defer server.Close() client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - factory := NewConfigFactory(client, api.DefaultSchedulerName) + factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) factory.Create() } @@ -65,7 +65,7 @@ func TestCreateFromConfig(t *testing.T) { server := httptest.NewServer(&handler) defer server.Close() client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - factory := NewConfigFactory(client, api.DefaultSchedulerName) + factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) // Pre-register some predicate and priority functions RegisterFitPredicate("PredicateOne", PredicateOne) @@ -106,7 +106,7 @@ func TestCreateFromEmptyConfig(t *testing.T) { server := httptest.NewServer(&handler) defer server.Close() client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - factory := NewConfigFactory(client, api.DefaultSchedulerName) + factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) configData = []byte(`{}`) if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { @@ -148,7 +148,7 @@ func TestDefaultErrorFunc(t *testing.T) { mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler) server := httptest.NewServer(mux) defer server.Close() - factory := NewConfigFactory(client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), api.DefaultSchedulerName) + factory := NewConfigFactory(client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}), api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) podBackoff := podBackoff{ perPodBackoff: map[types.NamespacedName]*backoffEntry{}, @@ -318,9 +318,9 @@ func TestResponsibleForPod(t *testing.T) { defer server.Close() client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) // factory of "default-scheduler" - factoryDefaultScheduler := NewConfigFactory(client, api.DefaultSchedulerName) + factoryDefaultScheduler := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) // factory of "foo-scheduler" - factoryFooScheduler := NewConfigFactory(client, "foo-scheduler") + factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) // scheduler annotaions to be tested schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"} schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"} @@ -370,3 +370,63 @@ func TestResponsibleForPod(t *testing.T) { } } } + +func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) { + handler := utiltesting.FakeHandler{ + StatusCode: 500, + ResponseBody: "", + T: t, + } + server := httptest.NewServer(&handler) + // TODO: Uncomment when fix #19254 + // defer server.Close() + client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + // factory of "default-scheduler" + factory := NewConfigFactory(client, api.DefaultSchedulerName, -1, api.DefaultFailureDomains) + _, err := factory.Create() + if err == nil { + t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing") + } +} + +func TestInvalidFactoryArgs(t *testing.T) { + handler := utiltesting.FakeHandler{ + StatusCode: 500, + ResponseBody: "", + T: t, + } + server := httptest.NewServer(&handler) + defer server.Close() + client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) + + testCases := []struct { + hardPodAffinitySymmetricWeight int + failureDomains string + expectErr string + }{ + { + hardPodAffinitySymmetricWeight: -1, + failureDomains: api.DefaultFailureDomains, + expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100", + }, + { + hardPodAffinitySymmetricWeight: 101, + failureDomains: api.DefaultFailureDomains, + expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100", + }, + { + hardPodAffinitySymmetricWeight: 0, + failureDomains: "INVALID_FAILURE_DOMAINS", + expectErr: "invalid failure domain: INVALID_FAILURE_DOMAINS", + }, + } + + for _, test := range testCases { + factory := NewConfigFactory(client, api.DefaultSchedulerName, test.hardPodAffinitySymmetricWeight, test.failureDomains) + _, err := factory.Create() + if err == nil { + t.Errorf("expected err: %s, got nothing", test.expectErr) + } + } + +} diff --git a/plugin/pkg/scheduler/factory/plugins.go b/plugin/pkg/scheduler/factory/plugins.go index a4a5857c27850..1235075971817 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/plugin/pkg/scheduler/factory/plugins.go @@ -33,14 +33,16 @@ import ( // PluginFactoryArgs are passed to all plugin factory functions. type PluginFactoryArgs struct { - PodLister algorithm.PodLister - ServiceLister algorithm.ServiceLister - ControllerLister algorithm.ControllerLister - ReplicaSetLister algorithm.ReplicaSetLister - NodeLister algorithm.NodeLister - NodeInfo predicates.NodeInfo - PVInfo predicates.PersistentVolumeInfo - PVCInfo predicates.PersistentVolumeClaimInfo + PodLister algorithm.PodLister + ServiceLister algorithm.ServiceLister + ControllerLister algorithm.ControllerLister + ReplicaSetLister algorithm.ReplicaSetLister + NodeLister algorithm.NodeLister + NodeInfo predicates.NodeInfo + PVInfo predicates.PersistentVolumeInfo + PVCInfo predicates.PersistentVolumeClaimInfo + HardPodAffinitySymmetricWeight int + FailureDomains []string } // A FitPredicateFactory produces a FitPredicate from the given args. diff --git a/test/component/scheduler/perf/util.go b/test/component/scheduler/perf/util.go index a1ffda127f6f4..d3ad7f22f2a97 100644 --- a/test/component/scheduler/perf/util.go +++ b/test/component/scheduler/perf/util.go @@ -62,7 +62,7 @@ func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destro Burst: 5000, }) - schedulerConfigFactory = factory.NewConfigFactory(c, api.DefaultSchedulerName) + schedulerConfigFactory = factory.NewConfigFactory(c, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { panic("Couldn't create scheduler config") diff --git a/docs/user-guide/node-selection/pod-with-node-affinity.yaml b/test/e2e/node-selection/pod-with-node-affinity.yaml similarity index 100% rename from docs/user-guide/node-selection/pod-with-node-affinity.yaml rename to test/e2e/node-selection/pod-with-node-affinity.yaml diff --git a/test/e2e/node-selection/pod-with-pod-affinity.yaml b/test/e2e/node-selection/pod-with-pod-affinity.yaml new file mode 100644 index 0000000000000..718602b9229ef --- /dev/null +++ b/test/e2e/node-selection/pod-with-pod-affinity.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: with-newlabels + annotations: + scheduler.alpha.kubernetes.io/affinity: > + { + "podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "topologyKey": "kubernetes.io/hostname" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "topologyKey": "kubernetes.io/hostname" + }] + } + } + another-annotation-key: another-annotation-value +spec: + containers: + - name: with-newlabels + image: gcr.io/google_containers/pause:2.0 diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 95c2d21ccc2f5..56d3ed6fa6281 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -393,7 +393,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { cleanupPods(c, ns) }) - It("validates that a pod with an invalid NodeAffinity is rejected [Feature:NodeAffinity]", func() { + It("validates that a pod with an invalid NodeAffinity is rejected", func() { By("Trying to launch a pod with an invalid Affinity data.") podName := "without-label" @@ -517,7 +517,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // Test Nodes does not have any label, hence it should be impossible to schedule Pod with // non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution. - It("validates that NodeAffinity is respected if not matching [Feature:NodeAffinity]", func() { + It("validates that NodeAffinity is respected if not matching", func() { By("Trying to schedule Pod with nonempty NodeSelector.") podName := "restricted-pod" @@ -573,7 +573,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // Keep the same steps with the test on NodeSelector, // but specify Affinity in Pod.Annotations, instead of NodeSelector. - It("validates that required NodeAffinity setting is respected if matching [Feature:NodeAffinity]", func() { + It("validates that required NodeAffinity setting is respected if matching", func() { // launch a pod to find a node which can launch a pod. We intentionally do // not just take the node list and choose the first of them. Depending on the // cluster and the scheduler it might be that a "normal" pod cannot be @@ -667,7 +667,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { }) // Verify that an escaped JSON string of NodeAffinity in a YAML PodSpec works. - It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work [Feature:NodeAffinity]", func() { + It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work", func() { // launch a pod to find a node which can launch a pod. We intentionally do // not just take the node list and choose the first of them. Depending on the // cluster and the scheduler it might be that a "normal" pod cannot be @@ -712,7 +712,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.") labelPodName := "with-labels" - nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "docs/user-guide/node-selection") + nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "test/e2e/node-selection") testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml") framework.RunKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns)) defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) @@ -727,4 +727,551 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(err) Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) }) + + // labelSelector Operator is DoesNotExist but values are there in requiredDuringSchedulingIgnoredDuringExecution + // part of podAffinity,so validation fails. + It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() { + By("Trying to launch a pod with an invalid pod Affinity data.") + podName := "without-label-" + string(util.NewUUID()) + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"name": "without-label"}, + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/affinity": ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 0, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "DoesNotExist", + "values":["securityscan"] + }] + }, + "namespaces": [], + "topologyKey": "kubernetes.io/hostname" + } + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + + if err == nil || !errors.IsInvalid(err) { + framework.Failf("Expect error of invalid, got : %v", err) + } + + // Wait a bit to allow scheduler to do its thing if the pod is not rejected. + // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + time.Sleep(10 * time.Second) + + cleanupPods(c, ns) + }) + + // Test Nodes does not have any pod, hence it should be impossible to schedule a Pod with pod affinity. + It("validates that Inter-pod-Affinity is respected if not matching", func() { + By("Trying to schedule Pod with nonempty Pod Affinity.") + podName := "without-label-" + string(util.NewUUID()) + + waitForStableCluster(c) + + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/affinity": ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector":{ + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "kubernetes.io/hostname" + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + // Wait a bit to allow scheduler to do its thing + // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + time.Sleep(10 * time.Second) + + verifyResult(c, podName, ns) + cleanupPods(c, ns) + }) + + // test the pod affinity successful matching scenario. + It("validates that InterPodAffinity is respected if matching", func() { + // launch a pod to find a node which can launch a pod. We intentionally do + // not just take the node list and choose the first of them. Depending on the + // cluster and the scheduler it might be that a "normal" pod cannot be + // scheduled onto it. + By("Trying to launch a pod with a label to get a node which can launch it.") + podName := "with-label-" + string(util.NewUUID()) + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"security": "S1"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) + pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(err) + + nodeName := pod.Spec.NodeName + defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) + + By("Trying to apply a random label on the found node.") + k := "e2e.inter-pod-affinity.kubernetes.io/zone" + v := "china-e2etest" + patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) + err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() + framework.ExpectNoError(err) + + node, err := c.Nodes().Get(nodeName) + framework.ExpectNoError(err) + Expect(node.Labels[k]).To(Equal(v)) + + By("Trying to launch the pod, now with podAffinity.") + labelPodName := "with-podaffinity-" + string(util.NewUUID()) + _, err = c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: labelPodName, + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/affinity": ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector":{ + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values": ["S1", "value2"] + }] + }, + "topologyKey": "` + k + `", + "namespaces":["` + ns + `"] + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: labelPodName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) + + // check that pod got scheduled. We intentionally DO NOT check that the + // pod is running because this will create a race condition with the + // kubelet and the scheduler: the scheduler might have scheduled a pod + // already when the kubelet does not know about its new label yet. The + // kubelet will then refuse to launch the pod. + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(err) + Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) + }) + + // test when the pod anti affinity rule is not satisfied, the pod would stay pending. + It("validates that InterPodAntiAffinity is respected if matching", func() { + // launch a pod to find a node which can launch a pod. We intentionally do + // not just take the node list and choose the first of them. Depending on the + // cluster and the scheduler it might be that a "normal" pod cannot be + // scheduled onto it. + By("Trying to launch a pod with a label to get a node which can launch it.") + podName := "with-label-" + string(util.NewUUID()) + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"service": "S1"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) + pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(err) + + nodeName := pod.Spec.NodeName + + By("Trying to apply a random label on the found node.") + k := "e2e.inter-pod-affinity.kubernetes.io/zone" + v := "china-e2etest" + patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) + err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() + framework.ExpectNoError(err) + + node, err := c.Nodes().Get(nodeName) + framework.ExpectNoError(err) + Expect(node.Labels[k]).To(Equal(v)) + + By("Trying to launch the pod, now with podAffinity with same Labels.") + labelPodName := "with-podaffinity-" + string(util.NewUUID()) + _, err = c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: labelPodName, + Labels: map[string]string{"service": "Diff"}, + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/affinity": ` + {"podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector":{ + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["S1", "value2"] + }] + }, + "topologyKey": "` + k + `", + "namespaces": ["` + ns + `"] + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: labelPodName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + // Wait a bit to allow scheduler to do its thing + // TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds. + framework.Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.") + time.Sleep(10 * time.Second) + + verifyResult(c, labelPodName, ns) + cleanupPods(c, ns) + }) + + // test the pod affinity successful matching scenario with multiple Label Operators. + It("validates that InterPodAffinity is respected if matching with multiple Affinities", func() { + // launch a pod to find a node which can launch a pod. We intentionally do + // not just take the node list and choose the first of them. Depending on the + // cluster and the scheduler it might be that a "normal" pod cannot be + // scheduled onto it. + By("Trying to launch a pod with a label to get a node which can launch it.") + podName := "with-label-" + string(util.NewUUID()) + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"security": "S1"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) + pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(err) + + nodeName := pod.Spec.NodeName + defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) + + By("Trying to apply a random label on the found node.") + k := "e2e.inter-pod-affinity.kubernetes.io/zone" + v := "kubernetes-e2e" + patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) + err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() + framework.ExpectNoError(err) + + node, err := c.Nodes().Get(nodeName) + framework.ExpectNoError(err) + Expect(node.Labels[k]).To(Equal(v)) + + By("Trying to launch the pod, now with multiple pod affinities with diff LabelOperators.") + labelPodName := "with-podaffinity-" + string(util.NewUUID()) + _, err = c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: labelPodName, + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/affinity": ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector":{ + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values": ["S1", "value2"] + }, + { + "key": "security", + "operator": "NotIn", + "values": ["S2"] + }, + { + "key": "security", + "operator":"Exists" + }] + }, + "topologyKey": "` + k + `" + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: labelPodName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) + + // check that pod got scheduled. We intentionally DO NOT check that the + // pod is running because this will create a race condition with the + // kubelet and the scheduler: the scheduler might have scheduled a pod + // already when the kubelet does not know about its new label yet. The + // kubelet will then refuse to launch the pod. + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(err) + Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) + }) + + // test the pod affinity and anti affinity successful matching scenario. + It("validates that InterPod Affinity and AntiAffinity is respected if matching", func() { + // launch a pod to find a node which can launch a pod. We intentionally do + // not just take the node list and choose the first of them. Depending on the + // cluster and the scheduler it might be that a "normal" pod cannot be + // scheduled onto it. + By("Trying to launch a pod with a label to get a node which can launch it.") + podName := "with-label-" + string(util.NewUUID()) + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"security": "S1"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) + pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(err) + + nodeName := pod.Spec.NodeName + defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) + + By("Trying to apply a random label on the found node.") + k := "e2e.inter-pod-affinity.kubernetes.io/zone" + v := "e2e-testing" + patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) + err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() + framework.ExpectNoError(err) + + node, err := c.Nodes().Get(nodeName) + framework.ExpectNoError(err) + Expect(node.Labels[k]).To(Equal(v)) + + By("Trying to launch the pod, now with Pod affinity and anti affinity.") + labelPodName := "with-podantiaffinity-" + string(util.NewUUID()) + _, err = c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: labelPodName, + Annotations: map[string]string{ + "scheduler.alpha.kubernetes.io/affinity": ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "topologyKey": "` + k + `" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "topologyKey": "` + k + `" + }] + }}`, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: labelPodName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) + + // check that pod got scheduled. We intentionally DO NOT check that the + // pod is running because this will create a race condition with the + // kubelet and the scheduler: the scheduler might have scheduled a pod + // already when the kubelet does not know about its new label yet. The + // kubelet will then refuse to launch the pod. + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(err) + Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) + }) + + // Verify that an escaped JSON string of pod affinity and pod anti affinity in a YAML PodSpec works. + It("validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work", func() { + // launch a pod to find a node which can launch a pod. We intentionally do + // not just take the node list and choose the first of them. Depending on the + // cluster and the scheduler it might be that a "normal" pod cannot be + // scheduled onto it. + By("Trying to launch a pod with label to get a node which can launch it.") + podName := "with-label-" + string(util.NewUUID()) + _, err := c.Pods(ns).Create(&api.Pod{ + TypeMeta: unversioned.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"security": "S1"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: podName, + Image: "gcr.io/google_containers/pause:2.0", + }, + }, + }, + }) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, podName, ns)) + pod, err := c.Pods(ns).Get(podName) + framework.ExpectNoError(err) + + nodeName := pod.Spec.NodeName + defer c.Pods(ns).Delete(podName, api.NewDeleteOptions(0)) + + By("Trying to apply a label with fake az info on the found node.") + k := "e2e.inter-pod-affinity.kubernetes.io/zone" + v := "e2e-az1" + patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v) + err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error() + framework.ExpectNoError(err) + + node, err := c.Nodes().Get(nodeName) + framework.ExpectNoError(err) + Expect(node.Labels[k]).To(Equal(v)) + + By("Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.") + labelPodName := "with-newlabels" + nodeSelectionRoot := filepath.Join(framework.TestContext.RepoRoot, "test/e2e/node-selection") + testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-pod-affinity.yaml") + framework.RunKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns)) + defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0)) + + // check that pod got scheduled. We intentionally DO NOT check that the + // pod is running because this will create a race condition with the + // kubelet and the scheduler: the scheduler might have scheduled a pod + // already when the kubelet does not know about its new label yet. The + // kubelet will then refuse to launch the pod. + framework.ExpectNoError(framework.WaitForPodNotPending(c, ns, labelPodName)) + labelPod, err := c.Pods(ns).Get(labelPodName) + framework.ExpectNoError(err) + Expect(labelPod.Spec.NodeName).To(Equal(nodeName)) + }) }) diff --git a/test/integration/extender_test.go b/test/integration/extender_test.go index 28b153a30f44e..3a0946b1cb887 100644 --- a/test/integration/extender_test.go +++ b/test/integration/extender_test.go @@ -241,7 +241,7 @@ func TestSchedulerExtender(t *testing.T) { } policy.APIVersion = testapi.Default.GroupVersion().String() - schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName) + schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.CreateFromConfig(policy) if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) diff --git a/test/integration/scheduler_test.go b/test/integration/scheduler_test.go index b8a74a7dc3a09..d7cec598e299c 100644 --- a/test/integration/scheduler_test.go +++ b/test/integration/scheduler_test.go @@ -68,7 +68,7 @@ func TestUnschedulableNodes(t *testing.T) { restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName) + schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) @@ -315,7 +315,7 @@ func TestMultiScheduler(t *testing.T) { // 1. create and start default-scheduler restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName) + schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err) @@ -386,7 +386,7 @@ func TestMultiScheduler(t *testing.T) { // 5. create and start a scheduler with name "foo-scheduler" restClient2 := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - schedulerConfigFactory2 := factory.NewConfigFactory(restClient2, "foo-scheduler") + schedulerConfigFactory2 := factory.NewConfigFactory(restClient2, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig2, err := schedulerConfigFactory2.Create() if err != nil { t.Errorf("Couldn't create scheduler config: %v", err) @@ -484,7 +484,7 @@ func TestAllocatable(t *testing.T) { // 1. create and start default-scheduler restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}) - schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName) + schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) schedulerConfig, err := schedulerConfigFactory.Create() if err != nil { t.Fatalf("Couldn't create scheduler config: %v", err)