Skip to content

Commit

Permalink
Merge pull request kubernetes#26451 from Random-Liu/cache_image_history
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue

Kubelet: Cache image history to eliminate the performance regression

Fix kubernetes#25057.

The image history operation takes almost 50% of cpu usage in kubelet performance test. We should cache image history instead of getting it from runtime everytime.

This PR cached image history in imageStatsProvider and added unit test.

@yujuhong @vishh 
/cc @kubernetes/sig-node 

Mark v1.3 because this is a relatively significant performance regression.

[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/.github/PULL_REQUEST_TEMPLATE.md?pixel)]()
  • Loading branch information
k8s-merge-robot committed May 30, 2016
2 parents fe5edcf + 52a3d8a commit 77de942
Show file tree
Hide file tree
Showing 4 changed files with 300 additions and 34 deletions.
18 changes: 11 additions & 7 deletions pkg/kubelet/dockertools/fake_docker_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ import (
dockercontainer "github.com/docker/engine-api/types/container"

"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/sets"
)

// FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup.
Expand All @@ -49,7 +48,6 @@ type FakeDockerClient struct {
Created []string
Stopped []string
Removed []string
RemovedImages sets.String
VersionInfo dockertypes.Version
Information dockertypes.Info
ExecInspect *dockertypes.ContainerExecInspect
Expand All @@ -68,10 +66,9 @@ func NewFakeDockerClient() *FakeDockerClient {

func NewFakeDockerClientWithVersion(version, apiVersion string) *FakeDockerClient {
return &FakeDockerClient{
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
Errors: make(map[string]error),
RemovedImages: sets.String{},
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
VersionInfo: dockertypes.Version{Version: version, APIVersion: apiVersion},
Errors: make(map[string]error),
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
}
}

Expand Down Expand Up @@ -471,14 +468,20 @@ func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecIns
}

func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.Image, error) {
f.called = append(f.called, "list_images")
err := f.popError("list_images")
return f.Images, err
}

func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDelete, error) {
err := f.popError("remove_image")
if err == nil {
f.RemovedImages.Insert(image)
for i := range f.Images {
if f.Images[i].ID == image {
f.Images = append(f.Images[:i], f.Images[i+1:]...)
break
}
}
}
return []dockertypes.ImageDelete{{Deleted: image}}, err
}
Expand Down Expand Up @@ -538,6 +541,7 @@ func (f *FakeDockerPuller) IsImagePresent(name string) (bool, error) {
func (f *FakeDockerClient) ImageHistory(id string) ([]dockertypes.ImageHistory, error) {
f.Lock()
defer f.Unlock()
f.called = append(f.called, "image_history")
history := f.ImageHistoryMap[id]
return history, nil
}
Expand Down
79 changes: 55 additions & 24 deletions pkg/kubelet/dockertools/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,54 +18,85 @@ package dockertools

import (
"fmt"
"sync"

"github.com/golang/glog"

dockertypes "github.com/docker/engine-api/types"
runtime "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/sets"
)

// imageStatsProvider exposes stats about all images currently available.
type imageStatsProvider struct {
sync.Mutex
// layers caches the current layers, key is the layer ID.
layers map[string]*dockertypes.ImageHistory
// imageToLayerIDs maps image to its layer IDs.
imageToLayerIDs map[string][]string
// Docker remote API client
c DockerInterface
}

func newImageStatsProvider(c DockerInterface) *imageStatsProvider {
return &imageStatsProvider{
layers: make(map[string]*dockertypes.ImageHistory),
imageToLayerIDs: make(map[string][]string),
c: c,
}
}

func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
images, err := isp.c.ListImages(dockertypes.ImageListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list docker images - %v", err)
}
// A map of all the image layers to its corresponding size.
imageMap := sets.NewString()
ret := &runtime.ImageStats{}
// Take the lock to protect the cache
isp.Lock()
defer isp.Unlock()
// Create new cache each time, this is a little more memory consuming, but:
// * ImageStats is only called every 10 seconds
// * We use pointers and reference to copy cache elements.
// The memory usage should be acceptable.
// TODO(random-liu): Add more logic to implement in place cache update.
newLayers := make(map[string]*dockertypes.ImageHistory)
newImageToLayerIDs := make(map[string][]string)
for _, image := range images {
// Get information about the various layers of each docker image.
history, err := isp.c.ImageHistory(image.ID)
if err != nil {
glog.V(2).Infof("failed to get history of docker image %v - %v", image, err)
continue
}
// Store size information of each layer.
for _, layer := range history {
// Skip empty layers.
if layer.Size == 0 {
glog.V(10).Infof("skipping image layer %v with size 0", layer)
layerIDs, ok := isp.imageToLayerIDs[image.ID]
if !ok {
// Get information about the various layers of the given docker image.
history, err := isp.c.ImageHistory(image.ID)
if err != nil {
// Skip the image and inspect again in next ImageStats if the image is still there
glog.V(2).Infof("failed to get history of docker image %+v - %v", image, err)
continue
}
key := layer.ID
// Some of the layers are empty.
// We are hoping that these layers are unique to each image.
// Still keying with the CreatedBy field to be safe.
if key == "" || key == "<missing>" {
key = key + layer.CreatedBy
// Cache each layer
for i := range history {
layer := &history[i]
key := layer.ID
// Some of the layers are empty.
// We are hoping that these layers are unique to each image.
// Still keying with the CreatedBy field to be safe.
if key == "" || key == "<missing>" {
key = key + layer.CreatedBy
}
layerIDs = append(layerIDs, key)
newLayers[key] = layer
}
if !imageMap.Has(key) {
ret.TotalStorageBytes += uint64(layer.Size)
} else {
for _, layerID := range layerIDs {
newLayers[layerID] = isp.layers[layerID]
}
imageMap.Insert(key)
}
newImageToLayerIDs[image.ID] = layerIDs
}
ret := &runtime.ImageStats{}
// Calculate the total storage bytes
for _, layer := range newLayers {
ret.TotalStorageBytes += uint64(layer.Size)
}
// Update current cache
isp.layers = newLayers
isp.imageToLayerIDs = newImageToLayerIDs
return ret, nil
}
Loading

0 comments on commit 77de942

Please sign in to comment.