Skip to content

Commit

Permalink
Use controller framework in HPA controller.
Browse files Browse the repository at this point in the history
  • Loading branch information
fgrzadkowski committed Mar 3, 2016
1 parent df5c841 commit b5c9af9
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 47 deletions.
4 changes: 2 additions & 2 deletions cmd/kube-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
}

if containsResource(resources, "daemonsets") {
Expand Down
4 changes: 2 additions & 2 deletions contrib/mesos/pkg/controllermanager/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,8 @@ func (s *CMServer) Run(_ []string) error {
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
}

if containsResource(resources, "daemonsets") {
Expand Down
105 changes: 66 additions & 39 deletions pkg/controller/podautoscaler/horizontal.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,15 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/record"
unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/watch"
)

const (
Expand All @@ -51,33 +55,70 @@ type HorizontalController struct {

metricsClient metrics.MetricsClient
eventRecorder record.EventRecorder

// A store of HPA objects, populated by the controller.
store cache.Store
// Watches changes to all HPA objects.
controller *framework.Controller
}

var downscaleForbiddenWindow = 5 * time.Minute
var upscaleForbiddenWindow = 3 * time.Minute

func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient) *HorizontalController {
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})

return &HorizontalController{
controller := &HorizontalController{
metricsClient: metricsClient,
eventRecorder: recorder,
scaleNamespacer: scaleNamespacer,
hpaNamespacer: hpaNamespacer,
}

controller.store, controller.controller = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options)
},
},
&extensions.HorizontalPodAutoscaler{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
hpa := obj.(*extensions.HorizontalPodAutoscaler)
err := controller.reconcileAutoscaler(hpa)
if err != nil {
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
}
},
UpdateFunc: func(old, cur interface{}) {
hpa := cur.(*extensions.HorizontalPodAutoscaler)
err := controller.reconcileAutoscaler(hpa)
if err != nil {
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
}
},
// We are not interested in deletions.
},
)

return controller
}

func (a *HorizontalController) Run(syncPeriod time.Duration) {
go wait.Until(func() {
if err := a.reconcileAutoscalers(); err != nil {
glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err)
}
}, syncPeriod, wait.NeverStop)
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting HPA Controller")
go a.controller.Run(stopCh)
<-stopCh
glog.Infof("Shutting down HPA Controller")
}

func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) {
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) {
targetUtilization := defaultTargetCPUUtilizationPercentage
if hpa.Spec.CPUUtilization != nil {
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
Expand All @@ -87,7 +128,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H

// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
if err != nil {
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
return 0, nil, time.Time{}, fmt.Errorf("failed to get cpu utilization: %v", err)
}

Expand All @@ -103,7 +144,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H
// extensions.CustomMetricsTargetList.
// Returns number of replicas, status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
// last timestamp of the metrics involved in computations or error, if occurred.
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
cmAnnotation string) (int, string, time.Time, error) {

currentReplicas := scale.Status.Replicas
Expand All @@ -130,7 +171,7 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.Ho
value, currentTimestamp, err := a.metricsClient.GetCustomMetric(customMetricTarget.Name, hpa.Namespace, scale.Status.Selector)
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
if err != nil {
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
return 0, "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
}
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
Expand Down Expand Up @@ -163,12 +204,12 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.Ho
return replicas, string(byteStatusList), timestamp, nil
}

func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodAutoscaler) error {
func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPodAutoscaler) error {
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name)

scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
if err != nil {
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
}
currentReplicas := scale.Status.Replicas
Expand Down Expand Up @@ -198,7 +239,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
}
}
Expand All @@ -207,7 +248,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
cmDesiredReplicas, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
if err != nil {
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
}
}
Expand Down Expand Up @@ -240,10 +281,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err)
}
a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
hpa.Name, currentReplicas, desiredReplicas)
} else {
Expand All @@ -253,7 +294,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
}

func shouldScale(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
if desiredReplicas != currentReplicas {
// Going down only if the usageRatio dropped significantly below the target
// and there was no rescaling in the last downscaleForbiddenWindow.
Expand All @@ -274,14 +315,14 @@ func shouldScale(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desire
return false
}

func (a *HorizontalController) updateCurrentReplicasInStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas int) {
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int) {
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
if err != nil {
glog.Errorf("%v", err)
}
}

func (a *HorizontalController) updateStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
CurrentReplicas: currentReplicas,
DesiredReplicas: desiredReplicas,
Expand All @@ -297,25 +338,11 @@ func (a *HorizontalController) updateStatus(hpa extensions.HorizontalPodAutoscal
hpa.Status.LastScaleTime = &now
}

_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa)
if err != nil {
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
}
return nil
}

func (a *HorizontalController) reconcileAutoscalers() error {
ns := api.NamespaceAll
list, err := a.hpaNamespacer.HorizontalPodAutoscalers(ns).List(api.ListOptions{})
if err != nil {
return fmt.Errorf("error listing nodes: %v", err)
}
for _, hpa := range list.Items {
err := a.reconcileAutoscaler(hpa)
if err != nil {
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
}
}
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
return nil
}
20 changes: 16 additions & 4 deletions pkg/controller/podautoscaler/horizontal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,12 @@ import (
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/watch"

heapster "k8s.io/heapster/api/v1/types"

"github.com/golang/glog"
"github.com/stretchr/testify/assert"
)

Expand Down Expand Up @@ -215,11 +218,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
assert.Equal(t, namespace, obj.Namespace)
assert.Equal(t, hpaName, obj.Name)
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas)
tc.statusUpdated = true
if tc.verifyCPUCurrent {
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage)
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage)
}
tc.statusUpdated = true
return true, obj, nil
})

Expand All @@ -233,6 +236,9 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
return true, obj, nil
})

fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))

return fakeClient
}

Expand All @@ -247,13 +253,19 @@ func (tc *testCase) verifyResults(t *testing.T) {
func (tc *testCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t)
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
hpaController := NewHorizontalController(testClient.Core(), testClient.Extensions(), testClient.Extensions(), metricsClient)
err := hpaController.reconcileAutoscalers()
assert.Equal(t, nil, err)
hpaController := NewHorizontalController(testClient.Core(), testClient.Extensions(), testClient.Extensions(), metricsClient, 0)
stop := make(chan struct{})
defer close(stop)
go hpaController.Run(stop)
if tc.verifyEvents {
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
time.Sleep(12 * time.Second)
}
// Each iteration for an HPA object ends with updating status.
wait.Poll(1*time.Second, 30*time.Second, func() (done bool, err error) {
glog.Infof("Status value: ", tc.statusUpdated)
return tc.statusUpdated, nil
})
tc.verifyResults(t)
}

Expand Down

0 comments on commit b5c9af9

Please sign in to comment.