Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into release-1.15
Browse files Browse the repository at this point in the history
  • Loading branch information
Bubblemelon committed Jun 5, 2019
2 parents b3d6f45 + 0d1007f commit bfa3755
Show file tree
Hide file tree
Showing 21 changed files with 453 additions and 138 deletions.
2 changes: 1 addition & 1 deletion build/lib/release.sh
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ function kube::release::package_src_tarball() {
\) -prune \
\))
)
"${TAR}" czf "${src_tarball}" -C "${KUBE_ROOT}" "${source_files[@]}"
"${TAR}" czf "${src_tarball}" --transform 's|^\.|kubernetes|' -C "${KUBE_ROOT}" "${source_files[@]}"
fi
}

Expand Down
4 changes: 4 additions & 0 deletions cluster/gce/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -869,6 +869,10 @@ function construct-windows-kubelet-flags {
# Turn off kernel memory cgroup notification.
flags+=" --experimental-kernel-memcg-notification=false"

# TODO(#78628): Re-enable KubeletPodResources when the issue is fixed.
# Force disable KubeletPodResources feature on Windows until #78628 is fixed.
flags+=" --feature-gates=KubeletPodResources=false"

KUBELET_ARGS="${flags}"
}

Expand Down
6 changes: 6 additions & 0 deletions cmd/kubeadm/app/cmd/options/constant.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,4 +141,10 @@ const (

// ForceReset flag instruct kubeadm to reset the node without prompting for confirmation
ForceReset = "force"

// CertificateRenewal flag instruct kubeadm to execute certificate renewal during upgrades
CertificateRenewal = "certificate-renewal"

// EtcdUpgrade flag instruct kubeadm to execute etcd upgrade during upgrades
EtcdUpgrade = "etcd-upgrade"
)
2 changes: 2 additions & 0 deletions cmd/kubeadm/app/cmd/phases/upgrade/node/controlplane.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ func NewControlPlane() workflow.Phase {
InheritFlags: []string{
options.DryRun,
options.KubeconfigPath,
options.CertificateRenewal,
options.EtcdUpgrade,
},
}
return phase
Expand Down
6 changes: 4 additions & 2 deletions cmd/kubeadm/app/cmd/upgrade/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,8 @@ func addUpgradeNodeFlags(flagSet *flag.FlagSet, nodeOptions *nodeOptions) {
options.AddKubeConfigFlag(flagSet, &nodeOptions.kubeConfigPath)
flagSet.BoolVar(&nodeOptions.dryRun, options.DryRun, nodeOptions.dryRun, "Do not change any state, just output the actions that would be performed.")
flagSet.StringVar(&nodeOptions.kubeletVersion, options.KubeletVersion, nodeOptions.kubeletVersion, "The *desired* version for the kubelet config after the upgrade. If not specified, the KubernetesVersion from the kubeadm-config ConfigMap will be used")
flagSet.BoolVar(&nodeOptions.renewCerts, options.CertificateRenewal, nodeOptions.renewCerts, "Perform the renewal of certificates used by component changed during upgrades.")
flagSet.BoolVar(&nodeOptions.etcdUpgrade, options.EtcdUpgrade, nodeOptions.etcdUpgrade, "Perform the upgrade of etcd.")
}

// newNodeData returns a new nodeData struct to be used for the execution of the kubeadm upgrade node workflow.
Expand Down Expand Up @@ -247,8 +249,8 @@ func NewCmdUpgradeControlPlane() *cobra.Command {
// adds flags to the node command
options.AddKubeConfigFlag(cmd.Flags(), &nodeOptions.kubeConfigPath)
cmd.Flags().BoolVar(&nodeOptions.dryRun, options.DryRun, nodeOptions.dryRun, "Do not change any state, just output the actions that would be performed.")
cmd.Flags().BoolVar(&nodeOptions.etcdUpgrade, "etcd-upgrade", nodeOptions.etcdUpgrade, "Perform the upgrade of etcd.")
cmd.Flags().BoolVar(&nodeOptions.renewCerts, "certificate-renewal", nodeOptions.renewCerts, "Perform the renewal of certificates used by component changed during upgrades.")
cmd.Flags().BoolVar(&nodeOptions.etcdUpgrade, options.EtcdUpgrade, nodeOptions.etcdUpgrade, "Perform the upgrade of etcd.")
cmd.Flags().BoolVar(&nodeOptions.renewCerts, options.CertificateRenewal, nodeOptions.renewCerts, "Perform the renewal of certificates used by component changed during upgrades.")

// initialize the workflow runner with the list of phases
nodeRunner.AppendPhase(phases.NewControlPlane())
Expand Down
138 changes: 79 additions & 59 deletions pkg/controller/podautoscaler/horizontal.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (

autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
Expand Down Expand Up @@ -230,7 +230,7 @@ func (a *HorizontalController) processNextWorkItem() bool {
}

// computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
// returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
// all metrics computed.
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
Expand All @@ -239,76 +239,96 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori

statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))

if scale.Status.Selector == "" {
errMsg := "selector is required"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}

selector, err := labels.Parse(scale.Status.Selector)
if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}

invalidMetricsCount := 0
var invalidMetricError error

for i, metricSpec := range metricSpecs {
if scale.Status.Selector == "" {
errMsg := "selector is required"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
replicaCountProposal, metricNameProposal, timestampProposal, err := a.computeReplicasForMetric(hpa, metricSpec, currentReplicas, selector, &statuses[i])

selector, err := labels.Parse(scale.Status.Selector)
if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}

var replicaCountProposal int32
var timestampProposal time.Time
var metricNameProposal string

switch metricSpec.Type {
case autoscalingv2.ObjectMetricSourceType:
metricSelector, err := metav1.LabelSelectorAsSelector(metricSpec.Object.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
if err != nil {
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
case autoscalingv2.PodsMetricSourceType:
metricSelector, err := metav1.LabelSelectorAsSelector(metricSpec.Pods.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
}
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i], metricSelector)
if err != nil {
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
case autoscalingv2.ResourceMetricSourceType:
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i])
if err != nil {
return 0, "", nil, time.Time{}, err
}
case autoscalingv2.ExternalMetricSourceType:
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(currentReplicas, metricSpec, hpa, selector, &statuses[i])
if err != nil {
return 0, "", nil, time.Time{}, err
}
default:
errMsg := fmt.Sprintf("unknown metric source type %q", string(metricSpec.Type))
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidMetricSourceType", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
invalidMetricsCount++
invalidMetricError = err
}
if replicas == 0 || replicaCountProposal > replicas {
if err == nil && (replicas == 0 || replicaCountProposal > replicas) {
timestamp = timestampProposal
replicas = replicaCountProposal
metric = metricNameProposal
}
}

setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
// If all metrics are invalid or some are invalid and we would scale down,
// return an error and set the condition of the hpa based on the first invalid metric.
// Otherwise set the condition as scaling active as we're going to scale
if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < currentReplicas) {
return 0, "", statuses, time.Time{}, fmt.Errorf("Invalid metrics (%v invalid out of %v), last error was: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
} else {
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %v", metric)
}
return replicas, metric, statuses, timestamp, nil
}

// computeReplicasForMetric computes the desired number of replicas for for a specific hpa and single metric specification.
func (a *HorizontalController) computeReplicasForMetric(hpa *autoscalingv2.HorizontalPodAutoscaler, spec autoscalingv2.MetricSpec,
currentReplicas int32, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, metricNameProposal string,
timestampProposal time.Time, err error) {

switch spec.Type {
case autoscalingv2.ObjectMetricSourceType:
metricSelector, err := metav1.LabelSelectorAsSelector(spec.Object.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForObjectMetric(currentReplicas, spec, hpa, selector, status, metricSelector)
if err != nil {
return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
case autoscalingv2.PodsMetricSourceType:
metricSelector, err := metav1.LabelSelectorAsSelector(spec.Pods.Metric.Selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
}
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForPodsMetric(currentReplicas, spec, hpa, selector, status, metricSelector)
if err != nil {
return 0, "", time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
case autoscalingv2.ResourceMetricSourceType:
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForResourceMetric(currentReplicas, spec, hpa, selector, status)
if err != nil {
return 0, "", time.Time{}, err
}
case autoscalingv2.ExternalMetricSourceType:
replicaCountProposal, timestampProposal, metricNameProposal, err = a.computeStatusForExternalMetric(currentReplicas, spec, hpa, selector, status)
if err != nil {
return 0, "", time.Time{}, err
}
default:
errMsg := fmt.Sprintf("unknown metric source type %q", string(spec.Type))
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidMetricSourceType", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %v", fmt.Errorf(errMsg))
return 0, "", time.Time{}, fmt.Errorf(errMsg)
}
return replicaCountProposal, metricNameProposal, timestampProposal, nil
}

func (a *HorizontalController) reconcileKey(key string) (deleted bool, err error) {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
Expand Down
96 changes: 95 additions & 1 deletion pkg/controller/podautoscaler/horizontal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (

autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -2668,4 +2668,98 @@ func TestNormalizeDesiredReplicas(t *testing.T) {
}
}

func TestScaleUpOneMetricEmpty(t *testing.T) {
tc := testCase{
minReplicas: 2,
maxReplicas: 6,
initialReplicas: 3,
expectedDesiredReplicas: 4,
CPUTarget: 30,
verifyCPUCurrent: true,
metricsTarget: []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ExternalMetricSourceType,
External: &autoscalingv2.ExternalMetricSource{
Metric: autoscalingv2.MetricIdentifier{
Name: "qps",
Selector: &metav1.LabelSelector{},
},
Target: autoscalingv2.MetricTarget{
Value: resource.NewMilliQuantity(100, resource.DecimalSI),
},
},
},
},
reportedLevels: []uint64{300, 400, 500},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
}
_, _, _, testEMClient, _ := tc.prepareTestClient(t)
testEMClient.PrependReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &emapi.ExternalMetricValueList{}, fmt.Errorf("something went wrong")
})
tc.testEMClient = testEMClient
tc.runTest(t)
}

func TestNoScaleDownOneMetricInvalid(t *testing.T) {
tc := testCase{
minReplicas: 2,
maxReplicas: 6,
initialReplicas: 5,
expectedDesiredReplicas: 5,
CPUTarget: 50,
metricsTarget: []autoscalingv2.MetricSpec{
{
Type: "CheddarCheese",
},
},
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "InvalidMetricSourceType"},
},
}

tc.runTest(t)
}

func TestNoScaleDownOneMetricEmpty(t *testing.T) {
tc := testCase{
minReplicas: 2,
maxReplicas: 6,
initialReplicas: 5,
expectedDesiredReplicas: 5,
CPUTarget: 50,
metricsTarget: []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ExternalMetricSourceType,
External: &autoscalingv2.ExternalMetricSource{
Metric: autoscalingv2.MetricIdentifier{
Name: "qps",
Selector: &metav1.LabelSelector{},
},
Target: autoscalingv2.MetricTarget{
Value: resource.NewMilliQuantity(1000, resource.DecimalSI),
},
},
},
},
reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsAPI: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetExternalMetric"},
},
}
_, _, _, testEMClient, _ := tc.prepareTestClient(t)
testEMClient.PrependReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &emapi.ExternalMetricValueList{}, fmt.Errorf("something went wrong")
})
tc.testEMClient = testEMClient
tc.runTest(t)
}

// TODO: add more tests
7 changes: 6 additions & 1 deletion pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -2213,7 +2213,12 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint, enableCAdvi

// ListenAndServePodResources runs the kubelet podresources grpc service
func (kl *Kubelet) ListenAndServePodResources() {
server.ListenAndServePodResources(util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket), kl.podManager, kl.containerManager)
socket, err := util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket)
if err != nil {
klog.V(2).Infof("Failed to get local endpoint for PodResources endpoint: %v", err)
return
}
server.ListenAndServePodResources(socket, kl.podManager, kl.containerManager)
}

// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
Expand Down
4 changes: 2 additions & 2 deletions pkg/kubelet/util/util_unix.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,10 @@ func parseEndpoint(endpoint string) (string, string, error) {
}

// LocalEndpoint returns the full path to a unix socket at the given endpoint
func LocalEndpoint(path, file string) string {
func LocalEndpoint(path, file string) (string, error) {
u := url.URL{
Scheme: unixProtocol,
Path: path,
}
return filepath.Join(u.String(), file+".sock")
return filepath.Join(u.String(), file+".sock"), nil
}
4 changes: 2 additions & 2 deletions pkg/kubelet/util/util_unsupported.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ func UnlockPath(fileHandles []uintptr) {
}

// LocalEndpoint empty implementation
func LocalEndpoint(path, file string) string {
return ""
func LocalEndpoint(path, file string) (string, error) {
return "", fmt.Errorf("LocalEndpoints are unsupported in this build")
}

// GetBootTime empty implementation
Expand Down
10 changes: 3 additions & 7 deletions pkg/kubelet/util/util_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,9 @@ func parseEndpoint(endpoint string) (string, string, error) {
}
}

// LocalEndpoint returns the full path to a windows named pipe
func LocalEndpoint(path, file string) string {
u := url.URL{
Scheme: npipeProtocol,
Path: path,
}
return u.String() + "//./pipe/" + file
// LocalEndpoint empty implementation
func LocalEndpoint(path, file string) (string, error) {
return "", fmt.Errorf("LocalEndpoints are unsupported in this build")
}

var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64")
Expand Down
Loading

0 comments on commit bfa3755

Please sign in to comment.