diff --git a/hack/make-rules/test-cmd-util.sh b/hack/make-rules/test-cmd-util.sh index 9577f390aa3a3..840484d4481f3 100644 --- a/hack/make-rules/test-cmd-util.sh +++ b/hack/make-rules/test-cmd-util.sh @@ -44,6 +44,8 @@ IMAGE_PERL="gcr.io/google-containers/perl" IMAGE_DAEMONSET_R1="gcr.io/google-containers/pause:2.0" IMAGE_DAEMONSET_R2="gcr.io/google-containers/pause:latest" IMAGE_DAEMONSET_R2_2="gcr.io/google-containers/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml +IMAGE_STATEFULSET_R1="gcr.io/google-containers/pause:latest" +IMAGE_STATEFULSET_R2="gcr.io/google-containers/nginx:test-cmd" # Expose kubectl directly for readability PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH @@ -3016,32 +3018,32 @@ run_daemonset_history_tests() { kube::test::get_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*" # Rollback to revision 1 - should be no-op kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert daemonset "{{range.items}}{{$daemonset_image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R1}:" + kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R1}:" kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" # Update the DaemonSet (revision 2) kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$daemonset_image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::wait_object_assert daemonset "{{range.items}}{{$daemonset_image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}" - kube::test::get_object_assert daemonset "{{range.items}}{{$daemonset_image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$daemonset_image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" # Rollback to revision 1 kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$daemonset_image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R1}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R1}:" kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" # Rollback to revision 1000000 - should fail output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1) kube::test::if_has_string "${output_message}" "unable to find specified revision" - kube::test::get_object_assert daemonset "{{range.items}}{{$daemonset_image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R1}:" + kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R1}:" kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" # Rollback to last revision kubectl rollout undo daemonset "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$daemonset_image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::wait_object_assert daemonset "{{range.items}}{{$daemonset_image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" # Clean up kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" @@ -3061,47 +3063,41 @@ run_statefulset_history_tests() { kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" '' # Command: create a StatefulSet (revision 1) - kubectl create -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}" - kube::test::get_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*" - + kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}" + # Rollback to revision 1 - should be no-op kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert statefulset "{{range.items}}{{$statefulset_image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" # Update the statefulset (revision 2) kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$statefulset_image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::wait_object_assert statefulset "{{range.items}}{{$statefulset_image_field1}}:{{end}}" "${IMAGE_STATEFULSET_R2_2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*" + kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" # Rollback to revision 1 with dry-run - should be no-op kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}" - kube::test::get_object_assert statefulset "{{range.items}}{{$statefulset_image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$statefulset_image_field1}}:{{end}}" "${IMAGE_STATEFULSET_R2_2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" + kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" # Rollback to revision 1 kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$statefulset_image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" # Rollback to revision 1000000 - should fail output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1) kube::test::if_has_string "${output_message}" "unable to find specified revision" - kube::test::get_object_assert statefulset "{{range.items}}{{$statefulset_image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" # Rollback to last revision kubectl rollout undo statefulset "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$statefulset_image_field0}}:{{end}}" "${IMAGE_statefulset_R2}:" - kube::test::wait_object_assert statefulset "{{range.items}}{{$statefulset_image_field1}}:{{end}}" "${IMAGE_statefulset_R2_2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" + kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" - # Clean up + # Clean up - delete either one suffices kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}" - kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}" set +o nounset set +o errexit @@ -4241,11 +4237,9 @@ runTests() { pdb_max_unavailable=".spec.maxUnavailable" template_generation_field=".spec.templateGeneration" container_len="(len .spec.template.spec.containers)" - daemonset_image_field0="(index .spec.template.spec.containers 0).image" - daemonset_image_field1="(index .spec.template.spec.containers 1).image" - statefulset_image_field0="(index .spec.template.spec.containers 0).image" - statefulset_image_field1="(index .spec.template.spec.containers 1).image" - + image_field0="(index .spec.template.spec.containers 0).image" + image_field1="(index .spec.template.spec.containers 1).image" + # Make sure "default" namespace exists. if kube::test::if_supports_resource "${namespaces}" ; then output_message=$(kubectl get "${kube_flags[@]}" namespaces) @@ -4263,6 +4257,232 @@ runTests() { kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml fi + ######################### + # Kubectl version # + ######################### + + record_command run_kubectl_version_tests + + ####################### + # kubectl config set # + ####################### + + record_command run_kubectl_config_set_tests + + ####################### + # kubectl local proxy # + ####################### + + record_command run_kubectl_local_proxy_tests + + ######################### + # RESTMapper evaluation # + ######################### + + record_command run_RESTMapper_evaluation_tests + + ################ + # Cluster Role # + ################ + + if kube::test::if_supports_resource "${clusterroles}" ; then + record_command run_clusterroles_tests + fi + + ######## + # Role # + ######## + if kube::test::if_supports_resource "${roles}" ; then + record_command run_role_tests + fi + + ######################### + # Assert short name # + ######################### + + record_command run_assert_short_name_tests + + ######################### + # Assert categories # + ######################### + + ## test if a category is exported during discovery + if kube::test::if_supports_resource "${pods}" ; then + record_command run_assert_categories_tests + fi + + ########################### + # POD creation / deletion # + ########################### + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_pod_tests + fi + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_save_config_tests + fi + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_kubectl_create_error_tests + fi + + if kube::test::if_supports_resource "${pods}" ; then + # TODO: Move apply tests to run on rs instead of pods so that they can be + # run for federation apiserver as well. + record_command run_kubectl_apply_tests + record_command run_kubectl_run_tests + record_command run_kubectl_using_deprecated_commands_test + record_command run_kubectl_create_filter_tests + fi + + if kube::test::if_supports_resource "${deployments}" ; then + record_command run_kubectl_apply_deployments_tests + fi + + ############### + # Kubectl get # + ############### + + if kube::test::if_supports_resource "${pods}" ; then + # TODO: Move get tests to run on rs instead of pods so that they can be + # run for federation apiserver as well. + record_command run_kubectl_get_tests + fi + + ################## + # Global timeout # + ################## + + if kube::test::if_supports_resource "${pods}" ; then + # TODO: Move request timeout tests to run on rs instead of pods so that they + # can be run for federation apiserver as well. + record_command run_kubectl_request_timeout_tests + fi + + ##################################### + # Third Party Resources # + ##################################### + + # customresourcedefinitions cleanup after themselves. Run these first, then TPRs + if kube::test::if_supports_resource "${customresourcedefinitions}" ; then + record_command run_crd_tests + fi + + ################# + # Run cmd w img # + ################# + + if kube::test::if_supports_resource "${deployments}" ; then + record_command run_cmd_with_img_tests + fi + + + ##################################### + # Recursive Resources via directory # + ##################################### + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_recursive_resources_tests + fi + + + ############## + # Namespaces # + ############## + if kube::test::if_supports_resource "${namespaces}" ; then + record_command run_namespace_tests + fi + + + ########### + # Secrets # + ########### + if kube::test::if_supports_resource "${namespaces}" ; then + if kube::test::if_supports_resource "${secrets}" ; then + record_command run_secrets_test + fi + fi + + + ###################### + # ConfigMap # + ###################### + + if kube::test::if_supports_resource "${namespaces}"; then + if kube::test::if_supports_resource "${configmaps}" ; then + record_command run_configmap_tests + fi + fi + + #################### + # Client Config # + #################### + + record_command run_client_config_tests + + #################### + # Service Accounts # + #################### + + if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then + record_command run_service_accounts_tests + fi + + ################# + # Pod templates # + ################# + + if kube::test::if_supports_resource "${podtemplates}" ; then + record_command run_pod_templates_tests + fi + + ############ + # Services # + ############ + + if kube::test::if_supports_resource "${services}" ; then + record_command run_service_tests + fi + + + ################## + # DaemonSets # + ################## + + if kube::test::if_supports_resource "${daemonsets}" ; then + record_command run_daemonset_tests + if kube::test::if_supports_resource "${controllerrevisions}"; then + record_command run_daemonset_history_tests + fi + fi + + ########################### + # Replication controllers # + ########################### + + if kube::test::if_supports_resource "${namespaces}" ; then + if kube::test::if_supports_resource "${replicationcontrollers}" ; then + record_command run_rc_tests + fi + fi + + ###################### + # Deployments # + ###################### + + if kube::test::if_supports_resource "${deployments}" ; then + record_command run_deployment_tests + fi + + ###################### + # Replica Sets # + ###################### + + if kube::test::if_supports_resource "${replicasets}" ; then + record_command run_rs_tests + fi + ################# # Stateful Sets # ################# @@ -4274,6 +4494,160 @@ runTests() { fi fi + ###################### + # Lists # + ###################### + + if kube::test::if_supports_resource "${services}" ; then + if kube::test::if_supports_resource "${deployments}" ; then + record_command run_lists_tests + fi + fi + + + ###################### + # Multiple Resources # + ###################### + if kube::test::if_supports_resource "${services}" ; then + if kube::test::if_supports_resource "${replicationcontrollers}" ; then + record_command run_multi_resources_tests + fi + fi + + ###################### + # Persistent Volumes # + ###################### + + if kube::test::if_supports_resource "${persistentvolumes}" ; then + record_command run_persistent_volumes_tests + fi + + ############################ + # Persistent Volume Claims # + ############################ + + if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then + record_command run_persistent_volume_claims_tests + fi + + ############################ + # Storage Classes # + ############################ + + if kube::test::if_supports_resource "${storageclass}" ; then + record_command run_storage_class_tests + fi + + ######### + # Nodes # + ######### + + if kube::test::if_supports_resource "${nodes}" ; then + record_command run_nodes_tests + fi + + + ######################## + # authorization.k8s.io # + ######################## + + if kube::test::if_supports_resource "${subjectaccessreviews}" ; then + record_command run_authorization_tests + fi + + # kubectl auth can-i + # kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes + if kube::test::if_supports_resource "${subjectaccessreviews}" ; then + output_message=$(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" "yes" + + output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" "yes" + + output_message=$(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" "the server doesn't have a resource type" + + output_message=$(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" "yes" + + output_message=$(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" "subresource can not be used with nonResourceURL" + + output_message=$(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}") + kube::test::if_empty_string "${output_message}" + fi + + ##################### + # Retrieve multiple # + ##################### + + if kube::test::if_supports_resource "${nodes}" ; then + if kube::test::if_supports_resource "${services}" ; then + record_command run_retrieve_multiple_tests + fi + fi + + + ##################### + # Resource aliasing # + ##################### + + if kube::test::if_supports_resource "${services}" ; then + if kube::test::if_supports_resource "${replicationcontrollers}" ; then + record_command run_resource_aliasing_tests + fi + fi + + ########### + # Explain # + ########### + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_kubectl_explain_tests + fi + + + ########### + # Swagger # + ########### + + record_command run_swagger_tests + + ##################### + # Kubectl --sort-by # + ##################### + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_kubectl_sort_by_tests + fi + + ############################ + # Kubectl --all-namespaces # + ############################ + + if kube::test::if_supports_resource "${pods}" ; then + record_command run_kubectl_all_namespace_tests + fi + + ################ + # Certificates # + ################ + + if kube::test::if_supports_resource "${csr}" ; then + record_command run_certificates_tests + fi + + ########### + # Plugins # + ########### + + record_command run_plugins_tests + + ################# + # Impersonation # + ################# + record_command run_impersonation_tests + kube::test::clear_all if [ "$foundError" == "True" ]; then diff --git a/hack/testdata/rollingupdate-statefulset-rv2.yaml b/hack/testdata/rollingupdate-statefulset-rv2.yaml index ee5dd10ca2dc1..e75fc26b7f639 100644 --- a/hack/testdata/rollingupdate-statefulset-rv2.yaml +++ b/hack/testdata/rollingupdate-statefulset-rv2.yaml @@ -28,10 +28,10 @@ spec: labels: app: nginx-statefulset spec: - terminationGracePeriodSeconds: 0 + terminationGracePeriodSeconds: 5 containers: - name: nginx - image: gcr.io/google_containers/nginx-slim:0.7 + image: gcr.io/google-containers/nginx:test-cmd ports: - containerPort: 80 name: web @@ -39,13 +39,4 @@ spec: - sh - -c - 'while true; do sleep 1; done' - - name: nginx-rv2 - image: gcr.io/google_containers/nginx-slim:0.8 - ports: - - containerPort: 81 - name: web-rv2 - command: - - sh - - -c - - 'while true; do sleep 1; done' \ No newline at end of file diff --git a/hack/testdata/rollingupdate-statefulset.yaml b/hack/testdata/rollingupdate-statefulset.yaml index 565ce33e8e96b..5c6f3b8ca21c2 100644 --- a/hack/testdata/rollingupdate-statefulset.yaml +++ b/hack/testdata/rollingupdate-statefulset.yaml @@ -28,10 +28,10 @@ spec: labels: app: nginx-statefulset spec: - terminationGracePeriodSeconds: 0 + terminationGracePeriodSeconds: 5 containers: - name: nginx - image: gcr.io/google_containers/nginx-slim:0.7 + image: gcr.io/google-containers/pause:latest ports: - containerPort: 80 name: web diff --git a/pkg/controller/statefulset/stateful_set.go b/pkg/controller/statefulset/stateful_set.go index 073e55dfe2c03..f9838ff931038 100644 --- a/pkg/controller/statefulset/stateful_set.go +++ b/pkg/controller/statefulset/stateful_set.go @@ -72,13 +72,6 @@ type StatefulSetController struct { setListerSynced cache.InformerSynced // pvcListerSynced returns true if the pvc shared informer has synced at least once pvcListerSynced cache.InformerSynced - /* - // historyLister get list/get history from the shared informers's store - historyLister appslisters.ControllerRevisionLister - // historyStoreSynced returns true if the history store has been synced at least once. - // Added as a member to the struct to allow injection for testing. - historyStoreSynced cache.InformerSynced - */ // StatefulSets that need to be synced. queue workqueue.RateLimitingInterface } @@ -141,16 +134,7 @@ func NewStatefulSetController( ) ssc.setLister = setInformer.Lister() ssc.setListerSynced = setInformer.Informer().HasSynced - /* - historyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: ssc.addHistory, - UpdateFunc: ssc.updateHistory, - DeleteFunc: ssc.deleteHistory, - }) - - ssc.historyLister = historyInformer.Lister() - ssc.historyStoreSynced = historyInformer.Informer().HasSynced - */ + // TODO: Watch volumes return ssc } @@ -477,137 +461,3 @@ func (ssc *StatefulSetController) syncStatefulSet(set *apps.StatefulSet, pods [] glog.V(4).Infof("Successfully synced StatefulSet %s/%s successful", set.Namespace, set.Name) return nil } - -/* -// addHistory enqueues the StatefulSet that manages a ControllerRevision when the ControllerRevision is created -// or when the controller manager is restarted. -func (ssc *StatefulSetController) addHistory(obj interface{}) { - history := obj.(*apps.ControllerRevision) - if history.DeletionTimestamp != nil { - // On a restart of the controller manager, it's possible for an object to - // show up in a state that is already pending deletion. - ssc.deleteHistory(history) - return - } - - // If it has a ControllerRef, that's all that matters. - if controllerRef := controller.GetControllerOf(history); controllerRef != nil { - ds := ssc.resolveControllerRef(history.Namespace, controllerRef) - if ds == nil { - return - } - glog.V(4).Infof("ControllerRevision %s added.", history.Name) - return - } - - // Otherwise, it's an orphan. Get a list of all matching StatefulSets and sync - // them to see if anyone wants to adopt it. - statefulSets := ssc.getStatefulSetsForHistory(history) - if len(statefulSets) == 0 { - return - } - glog.V(4).Infof("Orphan ControllerRevision %s added.", history.Name) - for _, ds := range statefulSets { - ssc.enqueueStatefulSet(ds) - } -} - -// updateHistory figures out what StatefulSet(s) manage a ControllerRevision when the ControllerRevision -// is updated and wake them up. If the anything of the ControllerRevision have changed, we need to -// awaken both the old and new StatefulSets. -func (ssc *StatefulSetController) updateHistory(old, cur interface{}) { - curHistory := cur.(*apps.ControllerRevision) - oldHistory := old.(*apps.ControllerRevision) - if curHistory.ResourceVersion == oldHistory.ResourceVersion { - // Periodic resync will send update events for all known ControllerRevisions. - return - } - - curControllerRef := controller.GetControllerOf(curHistory) - oldControllerRef := controller.GetControllerOf(oldHistory) - controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef) - if controllerRefChanged && oldControllerRef != nil { - // The ControllerRef was changed. Sync the old controller, if any. - if ds := ssc.resolveControllerRef(oldHistory.Namespace, oldControllerRef); ds != nil { - ssc.enqueueStatefulSet(ds) - } - } - - // If it has a ControllerRef, that's all that matters. - if curControllerRef != nil { - ds := ssc.resolveControllerRef(curHistory.Namespace, curControllerRef) - if ds == nil { - return - } - glog.V(4).Infof("ControllerRevision %s updated.", curHistory.Name) - ssc.enqueueStatefulSet(ds) - return - } - - // Otherwise, it's an orphan. If anything changed, sync matching controllers - // to see if anyone wants to adopt it now. - labelChanged := !reflect.DeepEqual(curHistory.Labels, oldHistory.Labels) - if labelChanged || controllerRefChanged { - StatefulSets := ssc.getStatefulSetsForHistory(curHistory) - if len(StatefulSets) == 0 { - return - } - glog.V(4).Infof("Orphan ControllerRevision %s updated.", curHistory.Name) - for _, ds := range StatefulSets { - ssc.enqueueStatefulSet(ds) - } - } -} - -// deleteHistory enqueues the StatefulSet that manages a ControllerRevision when -// the ControllerRevision is deleted. obj could be an *app.ControllerRevision, or -// a DeletionFinalStateUnknown marker item. -func (ssc *StatefulSetController) deleteHistory(obj interface{}) { - history, ok := obj.(*apps.ControllerRevision) - - // When a delete is dropped, the relist will notice a ControllerRevision in the store not - // in the list, leading to the insertion of a tombstone object which contains - // the deleted key/value. Note that this value might be stale. If the ControllerRevision - // changed labels the new StatefulSet will not be woken up till the periodic resync. - if !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) - return - } - history, ok = tombstone.Obj.(*apps.ControllerRevision) - if !ok { - utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ControllerRevision %#v", obj)) - return - } - } - - controllerRef := controller.GetControllerOf(history) - if controllerRef == nil { - // No controller should care about orphans being deleted. - return - } - ds := ssc.resolveControllerRef(history.Namespace, controllerRef) - if ds == nil { - return - } - glog.V(4).Infof("ControllerRevision %s deleted.", history.Name) - ssc.enqueueStatefulSet(ds) -} - -// getStatefulSetsForHistory returns a list of StatefulSets that potentially -// match a ControllerRevision. -func (ssc *StatefulSetController) getStatefulSetsForHistory(history *apps.ControllerRevision) []*apps.StatefulSet { - statefulSets, err := ssc.dsLister.GetHistoryStatefulSets(history) - if err != nil || len(statefulSets) == 0 { - return nil - } - if len(statefulSets) > 1 { - // ControllerRef will ensure we don't do anything crazy, but more than one - // item in this list nevertheless constitutes user error. - glog.V(4).Infof("User error! more than one StatefulSets is selecting ControllerRevision %s/%s with labels: %#v", - history.Namespace, history.Name, history.Labels) - } - return statefulSets -} -*/ diff --git a/pkg/controller/statefulset/stateful_set_control_test.go b/pkg/controller/statefulset/stateful_set_control_test.go index 3d0184968bd3f..ae91d962d5fa1 100644 --- a/pkg/controller/statefulset/stateful_set_control_test.go +++ b/pkg/controller/statefulset/stateful_set_control_test.go @@ -1274,7 +1274,7 @@ func TestStatefulSetControlRollback(t *testing.T) { t.Fatalf("%s: %s", test.name, err) } history.SortControllerRevisions(revisions) - set, err = applyRevision(set, revisions[0]) + set, err = ApplyRevision(set, revisions[0]) if err != nil { t.Fatalf("%s: %s", test.name, err) } diff --git a/pkg/controller/statefulset/stateful_set_utils_test.go b/pkg/controller/statefulset/stateful_set_utils_test.go index 58dfe05ecf602..49527f083f63e 100644 --- a/pkg/controller/statefulset/stateful_set_utils_test.go +++ b/pkg/controller/statefulset/stateful_set_utils_test.go @@ -280,7 +280,7 @@ func TestCreateApplyRevision(t *testing.T) { t.Fatal(err) } set.Spec.Template.Spec.Containers[0].Name = "foo" - restoredSet, err := applyRevision(set, revision) + restoredSet, err := ApplyRevision(set, revision) if err != nil { t.Fatal(err) } diff --git a/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go b/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go index 1a0ad254b57d6..0741792ac7a5f 100644 --- a/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go +++ b/staging/src/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go @@ -21,7 +21,6 @@ import ( apps "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" - //"k8s.io/api/apps/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" ) @@ -76,39 +75,3 @@ func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet return psList, nil } - -/* -// GetHistoryStatefulSets returns a list of StatefulSets that potentially -// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef -// will actually manage it. -// Returns an error only if no matching StatefulSets are found. -func (s *statefulSetLister) GetHistoryStatefulSets(history *apps.ControllerRevision) ([]*v1beta1.StatefulSet, error) { - if len(history.Labels) == 0 { - return nil, fmt.Errorf("no StatefulSet found for ControllerRevision %s because it has no labels", history.Name) - } - - list, err := s.StatefulSets(history.Namespace).List(labels.Everything()) - if err != nil { - return nil, err - } - - var statefulSets []*v1beta1.StatefulSet - for _, ss := range list { - selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { - continue - } - statefulSets = append(statefulSets, ss) - } - - if len(statefulSets) == 0 { - return nil, fmt.Errorf("could not find StatefulSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) - } - - return statefulSets, nil -} -*/