Skip to content

Commit

Permalink
Merge pull request kubernetes#135 from vardhaman22/k8s-v1.27.14
Browse files Browse the repository at this point in the history
[v1.27] Release v1.27.14
  • Loading branch information
kinarashah authored May 24, 2024
2 parents 194cb8b + 8070b86 commit 638a019
Show file tree
Hide file tree
Showing 33 changed files with 950 additions and 190 deletions.
347 changes: 241 additions & 106 deletions CHANGELOG/CHANGELOG-1.27.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion cluster/gce/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ fi
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
GCI_VERSION=${KUBE_GCI_VERSION:-cos-97-16919-103-16}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-109-17800-147-60}
export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
export NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/config-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ ALLOWED_NOTREADY_NODES=${ALLOWED_NOTREADY_NODES:-$(($(get-num-nodes) / 100))}
# you are updating the os image versions, update this variable.
# Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
GCI_VERSION=${KUBE_GCI_VERSION:-cos-97-16919-103-16}
GCI_VERSION=${KUBE_GCI_VERSION:-cos-109-17800-147-60}
export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
export NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
Expand Down
27 changes: 16 additions & 11 deletions pkg/scheduler/schedule_one.go
Original file line number Diff line number Diff line change
Expand Up @@ -386,14 +386,17 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework
// Filters the nodes to find the ones that fit the pod based on the framework
// filter plugins and filter extenders.
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.Diagnosis, error) {
diagnosis := framework.Diagnosis{
NodeToStatusMap: make(framework.NodeToStatusMap),
UnschedulablePlugins: sets.NewString(),
}

allNodes, err := sched.nodeInfoSnapshot.NodeInfos().List()
if err != nil {
return nil, diagnosis, err
return nil, framework.Diagnosis{
NodeToStatusMap: make(framework.NodeToStatusMap),
UnschedulablePlugins: sets.NewString(),
}, err
}

diagnosis := framework.Diagnosis{
NodeToStatusMap: make(framework.NodeToStatusMap, len(allNodes)),
UnschedulablePlugins: sets.NewString(),
}
// Run "prefilter" plugins.
preRes, s := fwk.RunPreFilterPlugins(ctx, state, pod)
Expand Down Expand Up @@ -434,12 +437,14 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F
nodes := allNodes
if !preRes.AllNodes() {
nodes = make([]*framework.NodeInfo, 0, len(preRes.NodeNames))
for n := range preRes.NodeNames {
nInfo, err := sched.nodeInfoSnapshot.NodeInfos().Get(n)
if err != nil {
return nil, diagnosis, err
for _, n := range allNodes {
if !preRes.NodeNames.Has(n.Node().Name) {
// We consider Nodes that are filtered out by PreFilterResult as rejected via UnschedulableAndUnresolvable.
// We have to record them in NodeToStatusMap so that they won't be considered as candidates in the preemption.
diagnosis.NodeToStatusMap[n.Node().Name] = framework.NewStatus(framework.UnschedulableAndUnresolvable, "node is filtered out by the prefilter result")
continue
}
nodes = append(nodes, nInfo)
nodes = append(nodes, n)
}
}
feasibleNodes, err := sched.findNodesThatPassFilters(ctx, fwk, state, pod, diagnosis, nodes)
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/schedule_one_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1965,7 +1965,7 @@ func TestSchedulerSchedulePod(t *testing.T) {
nodes: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("test-prefilter").UID("test-prefilter").Obj(),
wantNodes: sets.NewString("node2"),
wantEvaluatedNodes: pointer.Int32(1),
wantEvaluatedNodes: pointer.Int32(3),
},
{
name: "test prefilter plugin returning non-intersecting nodes",
Expand Down
14 changes: 14 additions & 0 deletions pkg/scheduler/testing/wrappers.go
Original file line number Diff line number Diff line change
Expand Up @@ -845,6 +845,20 @@ func (p *PersistentVolumeWrapper) HostPathVolumeSource(src *v1.HostPathVolumeSou
return p
}

// NodeAffinityIn creates a HARD node affinity (with the operator In)
// and injects into the pv.
func (p *PersistentVolumeWrapper) NodeAffinityIn(key string, vals []string) *PersistentVolumeWrapper {
if p.Spec.NodeAffinity == nil {
p.Spec.NodeAffinity = &v1.VolumeNodeAffinity{}
}
if p.Spec.NodeAffinity.Required == nil {
p.Spec.NodeAffinity.Required = &v1.NodeSelector{}
}
nodeSelector := MakeNodeSelector().In(key, vals).Obj()
p.Spec.NodeAffinity.Required.NodeSelectorTerms = append(p.Spec.NodeAffinity.Required.NodeSelectorTerms, nodeSelector.NodeSelectorTerms...)
return p
}

// ResourceClaimWrapper wraps a ResourceClaim inside.
type ResourceClaimWrapper struct{ resourcev1alpha2.ResourceClaim }

Expand Down
7 changes: 6 additions & 1 deletion staging/src/k8s.io/cloud-provider/cloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,8 @@ func DefaultLoadBalancerName(service *v1.Service) string {
}

// GetInstanceProviderID builds a ProviderID for a node in a cloud.
// Note that if the instance does not exist, we must return ("", cloudprovider.InstanceNotFound)
// cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped/sleeping
func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) {
instances, ok := cloud.Instances()
if !ok {
Expand All @@ -108,8 +110,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.
if err == NotImplemented {
return "", err
}
if err == InstanceNotFound {
return "", err
}

return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
return "", fmt.Errorf("failed to get instance ID from cloud provider: %w", err)
}
return cloud.ProviderName() + "://" + instanceID, nil
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) {

// At this point the node has NotReady status, we need to check if the node has been removed
// from the cloud provider. If node cannot be found in cloudprovider, then delete the node
exists, err := ensureNodeExistsByProviderID(ctx, c.cloud, node)
exists, err := c.ensureNodeExistsByProviderID(ctx, node)
if err != nil {
klog.Errorf("error checking if node %s exists: %v", node.Name, err)
continue
Expand Down Expand Up @@ -180,7 +180,7 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) {
// Node exists. We need to check this to get taint working in similar in all cloudproviders
// current problem is that shutdown nodes are not working in similar way ie. all cloudproviders
// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
shutdown, err := shutdownInCloudProvider(ctx, c.cloud, node)
shutdown, err := c.shutdownInCloudProvider(ctx, node)
if err != nil {
klog.Errorf("error checking if node %s is shutdown: %v", node.Name, err)
}
Expand All @@ -196,18 +196,49 @@ func (c *CloudNodeLifecycleController) MonitorNodes(ctx context.Context) {
}
}

// getProviderID returns the provider ID for the node. If Node CR has no provider ID,
// it will be the one from the cloud provider.
func (c *CloudNodeLifecycleController) getProviderID(ctx context.Context, node *v1.Node) (string, error) {
if node.Spec.ProviderID != "" {
return node.Spec.ProviderID, nil
}

if instanceV2, ok := c.cloud.InstancesV2(); ok {
metadata, err := instanceV2.InstanceMetadata(ctx, node)
if err != nil {
return "", err
}
return metadata.ProviderID, nil
}

providerID, err := cloudprovider.GetInstanceProviderID(ctx, c.cloud, types.NodeName(node.Name))
if err != nil {
return "", err
}

return providerID, nil
}

// shutdownInCloudProvider returns true if the node is shutdown on the cloud provider
func shutdownInCloudProvider(ctx context.Context, cloud cloudprovider.Interface, node *v1.Node) (bool, error) {
if instanceV2, ok := cloud.InstancesV2(); ok {
func (c *CloudNodeLifecycleController) shutdownInCloudProvider(ctx context.Context, node *v1.Node) (bool, error) {
if instanceV2, ok := c.cloud.InstancesV2(); ok {
return instanceV2.InstanceShutdown(ctx, node)
}

instances, ok := cloud.Instances()
instances, ok := c.cloud.Instances()
if !ok {
return false, errors.New("cloud provider does not support instances")
}

shutdown, err := instances.InstanceShutdownByProviderID(ctx, node.Spec.ProviderID)
providerID, err := c.getProviderID(ctx, node)
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}

shutdown, err := instances.InstanceShutdownByProviderID(ctx, providerID)
if err == cloudprovider.NotImplemented {
return false, nil
}
Expand All @@ -216,32 +247,22 @@ func shutdownInCloudProvider(ctx context.Context, cloud cloudprovider.Interface,
}

// ensureNodeExistsByProviderID checks if the instance exists by the provider id,
// If provider id in spec is empty it calls instanceId with node name to get provider id
func ensureNodeExistsByProviderID(ctx context.Context, cloud cloudprovider.Interface, node *v1.Node) (bool, error) {
if instanceV2, ok := cloud.InstancesV2(); ok {
func (c *CloudNodeLifecycleController) ensureNodeExistsByProviderID(ctx context.Context, node *v1.Node) (bool, error) {
if instanceV2, ok := c.cloud.InstancesV2(); ok {
return instanceV2.InstanceExists(ctx, node)
}

instances, ok := cloud.Instances()
instances, ok := c.cloud.Instances()
if !ok {
return false, errors.New("instances interface not supported in the cloud provider")
}

providerID := node.Spec.ProviderID
if providerID == "" {
var err error
providerID, err = instances.InstanceID(ctx, types.NodeName(node.Name))
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}

if providerID == "" {
klog.Warningf("Cannot find valid providerID for node name %q, assuming non existence", node.Name)
providerID, err := c.getProviderID(ctx, node)
if err != nil {
if err == cloudprovider.InstanceNotFound {
return false, nil
}
return false, err
}

return instances.InstanceExistsByProviderID(ctx, providerID)
Expand Down
Loading

0 comments on commit 638a019

Please sign in to comment.