Skip to content

Commit

Permalink
kubectl rolling-update support for same image
Browse files Browse the repository at this point in the history
  • Loading branch information
j3ffml committed Apr 25, 2016
1 parent e73606b commit 1baa473
Show file tree
Hide file tree
Showing 8 changed files with 63 additions and 19 deletions.
1 change: 1 addition & 0 deletions contrib/completions/bash/kubectl
Original file line number Diff line number Diff line change
Expand Up @@ -1668,6 +1668,7 @@ _kubectl_rolling-update()
flags_with_completion+=("-f")
flags_completion+=("__handle_filename_extension_flag json|yaml|yml")
flags+=("--image=")
flags+=("--image-pull-policy=")
flags+=("--include-extended-apis")
flags+=("--no-headers")
flags+=("--output=")
Expand Down
4 changes: 4 additions & 0 deletions docs/man/man1/kubectl-rolling-update.1
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ existing replication controller and overwrite at least one (common) label in its
\fB\-\-image\fP=""
Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with \-\-filename/\-f

.PP
\fB\-\-image\-pull\-policy\fP=""
Explicit policy for when to pull container images. Required when \-\-image is same as existing image, ignored otherwise.

.PP
\fB\-\-include\-extended\-apis\fP=true
If true, include definitions of new APIs via calls to the API server. [default true]
Expand Down
3 changes: 2 additions & 1 deletion docs/user-guide/kubectl/kubectl_rolling-update.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ kubectl rolling-update frontend-v1 frontend-v2 --rollback
--dry-run[=false]: If true, print out the changes that would be made, but don't actually make them.
-f, --filename=[]: Filename or URL to file to use to create the new replication controller.
--image="": Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with --filename/-f
--image-pull-policy="": Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.
--include-extended-apis[=true]: If true, include definitions of new APIs via calls to the API server. [default true]
--no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].
Expand Down Expand Up @@ -126,7 +127,7 @@ kubectl rolling-update frontend-v1 frontend-v2 --rollback

* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager

###### Auto generated by spf13/cobra on 5-Apr-2016
###### Auto generated by spf13/cobra on 21-Apr-2016

<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]()
Expand Down
3 changes: 3 additions & 0 deletions docs/yaml/kubectl/kubectl_rolling-update.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ options:
- name: image
usage: |
Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with --filename/-f
- name: image-pull-policy
usage: |
Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.
- name: include-extended-apis
default_value: "true"
usage: |
Expand Down
1 change: 1 addition & 0 deletions hack/verify-flags/known-flags.txt
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ ignore-daemonsets
ignore-not-found
image-gc-high-threshold
image-gc-low-threshold
image-pull-policy
include-extended-apis
input-dirs
insecure-bind-address
Expand Down
23 changes: 19 additions & 4 deletions pkg/kubectl/cmd/rollingupdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ func NewCmdRollingUpdate(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd.MarkFlagRequired("image")
cmd.Flags().String("deployment-label-key", "deployment", "The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise")
cmd.Flags().String("container", "", "Container name which will have its image upgraded. Only relevant when --image is specified, ignored otherwise. Required when using --image on a multi-container pod")
cmd.Flags().String("image-pull-policy", "", "Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.")
cmd.Flags().Bool("dry-run", false, "If true, print out the changes that would be made, but don't actually make them.")
cmd.Flags().Bool("rollback", false, "If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout")
cmdutil.AddValidateFlags(cmd)
Expand Down Expand Up @@ -150,6 +151,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key")
filename := ""
image := cmdutil.GetFlagString(cmd, "image")
pullPolicy := cmdutil.GetFlagString(cmd, "image-pull-policy")
oldName := args[0]
rollback := cmdutil.GetFlagBool(cmd, "rollback")
period := cmdutil.GetFlagDuration(cmd, "update-period")
Expand Down Expand Up @@ -233,8 +235,8 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
}
}
// If the --image option is specified, we need to create a new rc with at least one different selector
// than the old rc. This selector is the hash of the rc, which will differ because the new rc has a
// different image.
// than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for
// same-image updates.
if len(image) != 0 {
codec := api.Codecs.LegacyCodec(client.APIVersion())
keepOldName = len(args) == 1
Expand All @@ -248,10 +250,21 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
}
fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name)
} else {
config := &kubectl.NewControllerConfig{
Namespace: cmdNamespace,
OldName: oldName,
NewName: newName,
Image: image,
Container: container,
DeploymentKey: deploymentKey,
}
if oldRc.Spec.Template.Spec.Containers[0].Image == image {
return cmdutil.UsageError(cmd, "Specified --image must be distinct from existing container image")
if len(pullPolicy) == 0 {
return cmdutil.UsageError(cmd, "--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image")
}
config.PullPolicy = api.PullPolicy(pullPolicy)
}
newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, cmdNamespace, oldName, newName, image, container, deploymentKey)
newRc, err = kubectl.CreateNewControllerFromCurrentController(client, codec, config)
if err != nil {
return err
}
Expand All @@ -262,6 +275,8 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
if err != nil {
return err
}
// If new image is same as old, the hash may not be distinct, so add a suffix.
oldHash += "-orig"
oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out)
if err != nil {
return err
Expand Down
36 changes: 24 additions & 12 deletions pkg/kubectl/rolling_updater.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,51 +504,63 @@ func LoadExistingNextReplicationController(c client.ReplicationControllersNamesp
return newRc, err
}

func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, namespace, oldName, newName, image, container, deploymentKey string) (*api.ReplicationController, error) {
type NewControllerConfig struct {
Namespace string
OldName, NewName string
Image string
Container string
DeploymentKey string
PullPolicy api.PullPolicy
}

func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) {
containerIndex := 0
// load the old RC into the "new" RC
newRc, err := c.ReplicationControllers(namespace).Get(oldName)
newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName)
if err != nil {
return nil, err
}

if len(container) != 0 {
if len(cfg.Container) != 0 {
containerFound := false

for i, c := range newRc.Spec.Template.Spec.Containers {
if c.Name == container {
if c.Name == cfg.Container {
containerIndex = i
containerFound = true
break
}
}

if !containerFound {
return nil, fmt.Errorf("container %s not found in pod", container)
return nil, fmt.Errorf("container %s not found in pod", cfg.Container)
}
}

if len(newRc.Spec.Template.Spec.Containers) > 1 && len(container) == 0 {
if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 {
return nil, goerrors.New("Must specify container to update when updating a multi-container pod")
}

if len(newRc.Spec.Template.Spec.Containers) == 0 {
return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
}
newRc.Spec.Template.Spec.Containers[containerIndex].Image = image
newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image
if len(cfg.PullPolicy) != 0 {
newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy
}

newHash, err := api.HashObject(newRc, codec)
if err != nil {
return nil, err
}

if len(newName) == 0 {
newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
if len(cfg.NewName) == 0 {
cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
}
newRc.Name = newName
newRc.Name = cfg.NewName

newRc.Spec.Selector[deploymentKey] = newHash
newRc.Spec.Template.Labels[deploymentKey] = newHash
newRc.Spec.Selector[cfg.DeploymentKey] = newHash
newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash
// Clear resource version after hashing so that identical updates get different hashes.
newRc.ResourceVersion = ""
return newRc, nil
Expand Down
11 changes: 9 additions & 2 deletions pkg/kubectl/rolling_updater_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1090,12 +1090,19 @@ func TestRollingUpdater_multipleContainersInPod(t *testing.T) {
test.newRc.Spec.Template.Labels[test.deploymentKey] = deploymentHash
test.newRc.Name = fmt.Sprintf("%s-%s", test.newRc.Name, deploymentHash)

updatedRc, err := CreateNewControllerFromCurrentController(fake, codec, "", test.oldRc.ObjectMeta.Name, test.newRc.ObjectMeta.Name, test.image, test.container, test.deploymentKey)
config := &NewControllerConfig{
OldName: test.oldRc.ObjectMeta.Name,
NewName: test.newRc.ObjectMeta.Name,
Image: test.image,
Container: test.container,
DeploymentKey: test.deploymentKey,
}
updatedRc, err := CreateNewControllerFromCurrentController(fake, codec, config)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !reflect.DeepEqual(updatedRc, test.newRc) {
t.Errorf("expected:\n%v\ngot:\n%v\n", test.newRc, updatedRc)
t.Errorf("expected:\n%#v\ngot:\n%#v\n", test.newRc, updatedRc)
}
}
}
Expand Down

1 comment on commit 1baa473

@k8s-teamcity-mesosphere

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TeamCity OSS :: Kubernetes Mesos :: 4 - Smoke Tests Build 22523 outcome was FAILURE
Summary: Tests failed: 1, passed: 0, ignored: 277 Build time: 00:09:52

Failed tests

null: Kubernetes e2e suite.[k8s.io] Kubectl client [k8s.io] Guestbook application should create and stop a working application [Conformance]: <no details avaliable>

Please sign in to comment.