Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Only expose top N images in NodeStatus #25328

Merged
merged 2 commits into from
May 21, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,9 @@ const (

// Maximum period to wait for pod volume setup operations
maxWaitForVolumeOps = 20 * time.Minute

// maxImagesInStatus is the number of max images we store in image status.
maxImagesInNodeStatus = 50
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@resouer @vishh Are we going to expose this as a kubelet flag?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@yifan-gu Why do you think this should be configurable?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @ncdc

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not in my plan

)

// SyncHandler is an interface implemented by Kubelet, for testability
Expand Down Expand Up @@ -3096,6 +3099,12 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
if err != nil {
glog.Errorf("Error getting image list: %v", err)
} else {
// sort the images from max to min, and only set top N images into the node status.
sort.Sort(byImageSize(containerImages))
if maxImagesInNodeStatus < len(containerImages) {
containerImages = containerImages[0:maxImagesInNodeStatus]
}

for _, image := range containerImages {
imagesOnNode = append(imagesOnNode, api.ContainerImage{
Names: append(image.RepoTags, image.RepoDigests...),
Expand All @@ -3112,6 +3121,15 @@ func (kl *Kubelet) setNodeStatusGoRuntime(node *api.Node) {
node.Status.NodeInfo.Architecture = goRuntime.GOARCH
}

type byImageSize []kubecontainer.Image

// Sort from max to min
func (a byImageSize) Less(i, j int) bool {
return a[i].Size > a[j].Size
}
func (a byImageSize) Len() int { return len(a) }
func (a byImageSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }

// Set status for the node.
func (kl *Kubelet) setNodeStatusInfo(node *api.Node) {
kl.setNodeStatusMachineInfo(node)
Expand Down
111 changes: 83 additions & 28 deletions pkg/kubelet/kubelet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"os"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
Expand Down Expand Up @@ -67,6 +68,7 @@ import (
"k8s.io/kubernetes/pkg/util/diff"
"k8s.io/kubernetes/pkg/util/flowcontrol"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/rand"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
Expand All @@ -80,10 +82,19 @@ func init() {
utilruntime.ReallyCrash = true
}

const testKubeletHostname = "127.0.0.1"
const (
testKubeletHostname = "127.0.0.1"

const testReservationCPU = "200m"
const testReservationMemory = "100M"
testReservationCPU = "200m"
testReservationMemory = "100M"

maxImageTagsForTest = 3

// TODO(harry) any global place for these two?
// Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
minImgSize int64 = 23 * 1024 * 1024
maxImgSize int64 = 1000 * 1024 * 1024
)

type fakeHTTP struct {
url string
Expand All @@ -105,11 +116,9 @@ type TestKubelet struct {
mounter mount.Interface
}

// newTestKubelet returns test kubelet with two images.
func newTestKubelet(t *testing.T) *TestKubelet {
fakeRuntime := &containertest.FakeRuntime{}
fakeRuntime.RuntimeType = "test"
fakeRuntime.VersionInfo = "1.5.0"
fakeRuntime.ImageList = []kubecontainer.Image{
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
Expand All @@ -121,6 +130,53 @@ func newTestKubelet(t *testing.T) *TestKubelet {
Size: 456,
},
}
return newTestKubeletWithImageList(t, imageList)
}

// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
imageItem := kubecontainer.Image{
ID: string(util.NewUUID()),
RepoTags: generateImageTags(),
Size: rand.Int63nRange(minImgSize, maxImgSize+1),
}
imageList = append(imageList, imageItem)
}

// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
// 1. sort the imageList by size
sort.Sort(byImageSize(imageList))
// 2. convert sorted imageList to api.ContainerImage list
var expectedImageList []api.ContainerImage
for _, kubeImage := range imageList {
apiImage := api.ContainerImage{
Names: kubeImage.RepoTags,
SizeBytes: kubeImage.Size,
}

expectedImageList = append(expectedImageList, apiImage)
}
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
return imageList, expectedImageList[0:maxImagesInNodeStatus]
}

func generateImageTags() []string {
var tagList []string
count := rand.IntnRange(1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count))
}
return tagList
}

func newTestKubeletWithImageList(t *testing.T, imageList []kubecontainer.Image) *TestKubelet {
fakeRuntime := &containertest.FakeRuntime{}
fakeRuntime.RuntimeType = "test"
fakeRuntime.VersionInfo = "1.5.0"
fakeRuntime.ImageList = imageList
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &fake.Clientset{}
kubelet := &Kubelet{}
Expand Down Expand Up @@ -2349,7 +2405,9 @@ func updateDiskSpacePolicy(kubelet *Kubelet, mockCadvisor *cadvisortest.Mock, ro
}

func TestUpdateNewNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
// generate one more than maxImagesInNodeStatus in inputImageList
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
testKubelet := newTestKubeletWithImageList(t, inputImageList)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
Expand Down Expand Up @@ -2434,16 +2492,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
},
Images: expectedImageList,
},
}

Expand Down Expand Up @@ -2478,9 +2527,14 @@ func TestUpdateNewNodeStatus(t *testing.T) {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}

if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
} else {
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}

}

func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
Expand Down Expand Up @@ -2685,15 +2739,16 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
// images will be sorted from max to min in node status.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would be great if we can test the maxImagesInNodeStatus as well.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Tests added for this.

Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
Expand Down Expand Up @@ -2969,14 +3024,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
Images: []api.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
},
},
},
}
Expand Down
18 changes: 17 additions & 1 deletion pkg/util/rand/rand.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,30 @@ var rng = struct {
rand: rand.New(rand.NewSource(time.Now().UTC().UnixNano())),
}

// Intn generates an integer in range 0->max.
// Intn generates an integer in range [0,max).
// By design this should panic if input is invalid, <= 0.
func Intn(max int) int {
rng.Lock()
defer rng.Unlock()
return rng.rand.Intn(max)
}

// IntnRange generates an integer in range [min,max).
// By design this should panic if input is invalid, <= 0.
func IntnRange(min, max int) int {
rng.Lock()
defer rng.Unlock()
return rng.rand.Intn(max-min) + min
}

// IntnRange generates an int64 integer in range [min,max).
// By design this should panic if input is invalid, <= 0.
func Int63nRange(min, max int64) int64 {
rng.Lock()
defer rng.Unlock()
return rng.rand.Int63n(max-min) + min
}

// Seed seeds the rng with the provided seed.
func Seed(seed int64) {
rng.Lock()
Expand Down
28 changes: 28 additions & 0 deletions pkg/util/rand/rand_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ import (
"testing"
)

const (
maxRangeTestCount = 500
)

func TestString(t *testing.T) {
valid := "0123456789abcdefghijklmnopqrstuvwxyz"
for _, l := range []int{0, 1, 2, 10, 123} {
Expand Down Expand Up @@ -84,3 +88,27 @@ func TestShuffle(t *testing.T) {
t.Errorf("Shuffle(%v) => %v, want %v", have, got, want)
}
}

func TestIntnRange(t *testing.T) {
// 0 is invalid.
for min, max := range map[int]int{1: 2, 10: 123, 100: 500} {
for i := 0; i < maxRangeTestCount; i++ {
inrange := IntnRange(min, max)
if inrange < min || inrange >= max {
t.Errorf("%v out of range (%v,%v)", inrange, min, max)
}
}
}
}

func TestInt63nRange(t *testing.T) {
// 0 is invalid.
for min, max := range map[int64]int64{1: 2, 10: 123, 100: 500} {
for i := 0; i < maxRangeTestCount; i++ {
inrange := Int63nRange(min, max)
if inrange < min || inrange >= max {
t.Errorf("%v out of range (%v,%v)", inrange, min, max)
}
}
}
}