Skip to content

Commit

Permalink
Manually revert kubernetes#76976
Browse files Browse the repository at this point in the history
  • Loading branch information
zhan849 authored and superlime committed Jun 3, 2019
1 parent e8aa989 commit 4d00f07
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 182 deletions.
1 change: 0 additions & 1 deletion staging/src/k8s.io/legacy-cloud-providers/aws/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/cloud-provider/volume:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
Expand Down
118 changes: 50 additions & 68 deletions staging/src/k8s.io/legacy-cloud-providers/aws/aws.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,16 +205,6 @@ const volumeAttachmentStuck = "VolumeAttachmentStuck"
// Indicates that a node has volumes stuck in attaching state and hence it is not fit for scheduling more pods
const nodeWithImpairedVolumes = "NodeWithImpairedVolumes"

const (
// These constants help to identify if a node is a master or a minion
labelKeyNodeRole = "kubernetes.io/role"
nodeMasterRole = "master"
nodeMinionRole = "node"
labelKeyNodeMaster = "node-role.kubernetes.io/master"
labelKeyNodeCompute = "node-role.kubernetes.io/compute"
labelKeyNodeMinion = "node-role.kubernetes.io/node"
)

const (
// volumeAttachmentConsecutiveErrorLimit is the number of consecutive errors we will ignore when waiting for a volume to attach/detach
volumeAttachmentStatusConsecutiveErrorLimit = 10
Expand Down Expand Up @@ -1628,77 +1618,69 @@ func (c *Cloud) InstanceType(ctx context.Context, nodeName types.NodeName) (stri
// GetCandidateZonesForDynamicVolume retrieves a list of all the zones in which nodes are running
// It currently involves querying all instances
func (c *Cloud) GetCandidateZonesForDynamicVolume() (sets.String, error) {
zones := sets.NewString()
// We don't currently cache this; it is currently used only in volume
// creation which is expected to be a comparatively rare occurrence.

// TODO: Caching / expose v1.Nodes to the cloud provider?
// TODO: We could also query for subnets, I think

// Note: It is more efficient to call the EC2 API twice with different tag
// filters than to call it once with a tag filter that results in a logical
// OR. For really large clusters the logical OR will result in EC2 API rate
// limiting.
instances := []*ec2.Instance{}

// TODO: list from cache?
nodes, err := c.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
baseFilters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}

filters := c.tagging.addFilters(baseFilters)
di, err := c.describeInstances(filters)
if err != nil {
klog.Errorf("Failed to get nodes from api server: %#v", err)
return nil, err
}

for _, n := range nodes.Items {
if !c.isNodeReady(&n) {
klog.V(4).Infof("Ignoring not ready node %q in zone discovery", n.Name)
continue
}
// In some cluster provisioning software, a node can be both a minion and a master. Therefore we white-list
// here, and only filter out node that is not minion AND is labeled as master explicitly
if c.isMinionNode(&n) || !c.isMasterNode(&n) {
if zone, ok := n.Labels[v1.LabelZoneFailureDomain]; ok {
zones.Insert(zone)
} else {
klog.Warningf("Node %s does not have zone label, ignore for zone discovery.", n.Name)
}
} else {
klog.V(4).Infof("Ignoring master node %q in zone discovery", n.Name)
instances = append(instances, di...)

if c.tagging.usesLegacyTags {
filters = c.tagging.addLegacyFilters(baseFilters)
di, err = c.describeInstances(filters)
if err != nil {
return nil, err
}

instances = append(instances, di...)
}

klog.V(2).Infof("Found instances in zones %s", zones)
return zones, nil
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances returned")
}

zones := sets.NewString()

// isNodeReady checks node condition and return true if NodeReady is marked as true
func (c *Cloud) isNodeReady(node *v1.Node) bool {
for _, c := range node.Status.Conditions {
if c.Type == v1.NodeReady {
return c.Status == v1.ConditionTrue
for _, instance := range instances {
// We skip over master nodes, if the installation tool labels them with one of the well-known master labels
// This avoids creating a volume in a zone where only the master is running - e.g. #34583
// This is a short-term workaround until the scheduler takes care of zone selection
master := false
for _, tag := range instance.Tags {
tagKey := aws.StringValue(tag.Key)
if awsTagNameMasterRoles.Has(tagKey) {
master = true
}
}
}
return false
}

// isMasterNode checks if the node is labeled as master
func (c *Cloud) isMasterNode(node *v1.Node) bool {
// Master node has one or more of the following labels:
//
// kubernetes.io/role: master
// node-role.kubernetes.io/master: ""
// node-role.kubernetes.io/master: "true"
if val, ok := node.Labels[labelKeyNodeMaster]; ok && val != "false" {
return true
} else if role, ok := node.Labels[labelKeyNodeRole]; ok && role == nodeMasterRole {
return true
}
return false
}
if master {
klog.V(4).Infof("Ignoring master instance %q in zone discovery", aws.StringValue(instance.InstanceId))
continue
}

// isMinionNode checks if the node is labeled as minion
func (c *Cloud) isMinionNode(node *v1.Node) bool {
// Minion node has one or more oof the following labels:
//
// kubernetes.io/role: "node"
// node-role.kubernetes.io/compute: "true"
// node-role.kubernetes.io/node: ""
if val, ok := node.Labels[labelKeyNodeMinion]; ok && val != "false" {
return true
} else if val, ok := node.Labels[labelKeyNodeCompute]; ok && val != "false" {
return true
} else if role, ok := node.Labels[labelKeyNodeRole]; ok && role == nodeMinionRole {
return true
if instance.Placement != nil {
zone := aws.StringValue(instance.Placement.AvailabilityZone)
zones.Insert(zone)
}
}
return false

klog.V(2).Infof("Found instances in zones %s", zones)
return zones, nil
}

// GetZone implements Zones.GetZone
Expand Down
113 changes: 0 additions & 113 deletions staging/src/k8s.io/legacy-cloud-providers/aws/aws_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/fake"
cloudvolume "k8s.io/cloud-provider/volume"
)

Expand Down Expand Up @@ -1876,118 +1875,6 @@ func TestRegionIsValid(t *testing.T) {
assert.False(t, isRegionValid("pl-fake-991a", fake.metadata), "expected region 'pl-fake-991' to be invalid but it was not")
}

func TestGetCandidateZonesForDynamicVolume(t *testing.T) {
tests := []struct {
name string
labels map[string]string
ready bool
expectedZones sets.String
}{
{
name: "master node with role label",
labels: map[string]string{labelKeyNodeRole: nodeMasterRole, v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString(),
},
{
name: "master node with master label empty",
labels: map[string]string{labelKeyNodeMaster: "", v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString(),
},
{
name: "master node with master label true",
labels: map[string]string{labelKeyNodeMaster: "true", v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString(),
},
{
name: "master node with master label false",
labels: map[string]string{labelKeyNodeMaster: "false", v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString("us-east-1a"),
},
{
name: "minion node with role label",
labels: map[string]string{labelKeyNodeRole: nodeMinionRole, v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString("us-east-1a"),
},
{
name: "minion node with minion label",
labels: map[string]string{labelKeyNodeMinion: "", v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString("us-east-1a"),
},
{
name: "minion node with compute label",
labels: map[string]string{labelKeyNodeCompute: "true", v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString("us-east-1a"),
},
{
name: "master and minion node",
labels: map[string]string{labelKeyNodeMaster: "true", labelKeyNodeCompute: "true", v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString("us-east-1a"),
},
{
name: "node not ready",
labels: map[string]string{v1.LabelZoneFailureDomain: "us-east-1a"},
ready: false,
expectedZones: sets.NewString(),
},
{
name: "node has no zone",
labels: map[string]string{},
ready: true,
expectedZones: sets.NewString(),
},
{
name: "node with no label",
labels: map[string]string{v1.LabelZoneFailureDomain: "us-east-1a"},
ready: true,
expectedZones: sets.NewString("us-east-1a"),
},
}

for i, test := range tests {
t.Run(test.name, func(t *testing.T) {
awsServices := newMockedFakeAWSServices(TestClusterID)
c, _ := newAWSCloud(CloudConfig{}, awsServices)
c.kubeClient = fake.NewSimpleClientset()
nodeName := fmt.Sprintf("node-%d", i)
_, err := c.kubeClient.CoreV1().Nodes().Create(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
Labels: test.labels,
},
Status: genNodeStatus(test.ready),
})
assert.Nil(t, err)
zones, err := c.GetCandidateZonesForDynamicVolume()
assert.Nil(t, err)
assert.Equal(t, test.expectedZones, zones)
})
}
}

func genNodeStatus(ready bool) v1.NodeStatus {
status := v1.ConditionFalse
if ready {
status = v1.ConditionTrue
}
ret := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: status,
},
},
}
return ret
}

func newMockedFakeAWSServices(id string) *FakeAWSServices {
s := NewFakeAWSServices(id)
s.ec2 = &MockedFakeEC2{FakeEC2Impl: s.ec2.(*FakeEC2Impl)}
Expand Down

0 comments on commit 4d00f07

Please sign in to comment.