From f92b20888bcbecfe2c3aa58c93d0081208f09261 Mon Sep 17 00:00:00 2001 From: Abhi Shah Date: Wed, 4 Mar 2015 12:19:51 -0800 Subject: [PATCH] updated gce-pd and redis examples to use v1beta3 --- examples/gce-pd/v1beta3/testpd.yaml | 21 ++++ examples/redis/README.md | 119 +----------------- examples/redis/redis-controller.yaml | 3 +- examples/redis/redis-master-service.yaml | 10 -- examples/redis/redis-master.yaml | 6 +- examples/redis/redis-proxy.yaml | 2 +- examples/redis/redis-sentinel-controller.yaml | 3 +- examples/redis/redis-sentinel.yaml | 22 ---- examples/redis/redis-service.yaml | 9 -- examples/redis/redis-slave.yaml | 26 ---- examples/redis/v1beta3/README.md | 115 +++++++++++++++++ examples/redis/v1beta3/redis-controller.yaml | 28 +++++ examples/redis/v1beta3/redis-master.yaml | 33 +++++ examples/redis/v1beta3/redis-proxy.yaml | 14 +++ .../v1beta3/redis-sentinel-controller.yaml | 23 ++++ .../redis/v1beta3/redis-sentinel-service.yaml | 12 ++ examples/walkthrough/v1beta3/README.md | 68 ++++++++++ examples/walkthrough/v1beta3/k8s201.md | 86 +++++++++++++ .../v1beta3/pod-with-http-healthcheck.yaml | 21 ++++ examples/walkthrough/v1beta3/pod1.yaml | 8 ++ examples/walkthrough/v1beta3/pod2.yaml | 17 +++ .../v1beta3/replication-controller.yaml | 24 ++++ examples/walkthrough/v1beta3/service.yaml | 18 +++ 23 files changed, 498 insertions(+), 190 deletions(-) create mode 100644 examples/gce-pd/v1beta3/testpd.yaml delete mode 100644 examples/redis/redis-master-service.yaml delete mode 100644 examples/redis/redis-sentinel.yaml delete mode 100644 examples/redis/redis-service.yaml delete mode 100644 examples/redis/redis-slave.yaml create mode 100644 examples/redis/v1beta3/README.md create mode 100644 examples/redis/v1beta3/redis-controller.yaml create mode 100644 examples/redis/v1beta3/redis-master.yaml create mode 100644 examples/redis/v1beta3/redis-proxy.yaml create mode 100644 examples/redis/v1beta3/redis-sentinel-controller.yaml create mode 100644 examples/redis/v1beta3/redis-sentinel-service.yaml create mode 100644 examples/walkthrough/v1beta3/README.md create mode 100644 examples/walkthrough/v1beta3/k8s201.md create mode 100644 examples/walkthrough/v1beta3/pod-with-http-healthcheck.yaml create mode 100644 examples/walkthrough/v1beta3/pod1.yaml create mode 100644 examples/walkthrough/v1beta3/pod2.yaml create mode 100644 examples/walkthrough/v1beta3/replication-controller.yaml create mode 100644 examples/walkthrough/v1beta3/service.yaml diff --git a/examples/gce-pd/v1beta3/testpd.yaml b/examples/gce-pd/v1beta3/testpd.yaml new file mode 100644 index 0000000000000..6558bde9e39ba --- /dev/null +++ b/examples/gce-pd/v1beta3/testpd.yaml @@ -0,0 +1,21 @@ +apiVersion: v1beta3 +kind: Pod +metadata: + labels: + test: testpd + name: testpd +spec: + containers: + - name: testpd + image: kubernetes/pause + volumeMounts: + # name must match the volume name below + - name: testpd + # mount path within the container + mountPath: /testpd + volumes: + - name: testpd + persistentDisk: + # This GCE PD must already exist. + pdName: %insert_pd_name_here% + fsType: ext4 diff --git a/examples/redis/README.md b/examples/redis/README.md index 0dff9b7c50046..ee63e7db133f1 100644 --- a/examples/redis/README.md +++ b/examples/redis/README.md @@ -13,45 +13,8 @@ is a [_Pod_](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/ We will used the shared network namespace to bootstrap our Redis cluster. In particular, the very first sentinel needs to know how to find the master (subsequent sentinels just ask the first sentinel). Because all containers in a Pod share a network namespace, the sentinel can simply look at ```$(hostname -i):6379```. -Here is the config for the initial master and sentinel pod: -```yaml -id: redis-master -kind: Pod -apiVersion: v1beta1 -desiredState: - manifest: - version: v1beta1 - id: redis-master - containers: - - name: master - image: kubernetes/redis:v1 - cpu: 1000 - ports: - - name: api - containerPort: 6379 - volumeMounts: - - name: data - mountPath: /redis-master-data - env: - - key: MASTER - value: "true" - - name: sentinel - image: kubernetes/redis:v1 - ports: - - name: api - containerPort: 26379 - env: - - key: SENTINEL - value: "true" - volumes: - - name: data - source: - emptyDir: {} -labels: - name: redis - role: master - redis-sentinel: "true" -``` +Here is the config for the initial master and sentinel pod: [redis-master.yaml](redis-master.yaml) + Create this master as follows: ```sh @@ -63,20 +26,7 @@ In Kubernetes a _Service_ describes a set of Pods that perform the same task. F In Redis, we will use a Kubernetes Service to provide a discoverable endpoints for the Redis sentinels in the cluster. From the sentinels Redis clients can find the master, and then the slaves and other relevant info for the cluster. This enables new members to join the cluster when failures occur. -Here is the definition of the sentinel service: - -```yaml -id: redis-sentinel -kind: Service -apiVersion: v1beta1 -port: 26379 -containerPort: 26379 -selector: - redis-sentinel: "true" -labels: - name: sentinel - role: service -``` +Here is the definition of the sentinel service:[redis-sentinel-service.yaml](redis-sentinel-service.yaml) Create this service: ```sh @@ -89,39 +39,7 @@ So far, what we have done is pretty manual, and not very fault-tolerant. If the In Kubernetes a _Replication Controller_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Redis server. - -```yaml -id: redis -kind: ReplicationController -apiVersion: v1beta1 -desiredState: - replicas: 1 - replicaSelector: - name: redis - # This template is basically identical to the single pod - # definition above - podTemplate: - desiredState: - manifest: - version: v1beta1 - id: redis - containers: - - name: redis - image: kubernetes/redis:v1 - cpu: 1000 - ports: - - name: api - containerPort: 6379 - volumeMounts: - - name: data - mountPath: /redis-master-data - volumes: - - name: data - source: - emptyDir: {} - labels: - name: redis -``` +[redis-controller.yaml](redis-controller.yaml) The bulk of this controller config is actually identical to the redis-master pod definition above. It forms the template or "cookie cutter" that defines what it means to be a member of this set. @@ -131,34 +49,7 @@ Create this controller: kubectl create -f examples/redis/redis-controller.yaml ``` -We'll do the same thing for the sentinel. Here is the controller config: -```yaml -id: redis-sentinel -kind: ReplicationController -apiVersion: v1beta1 -desiredState: - replicas: 1 - replicaSelector: - redis-sentinel: "true" - podTemplate: - desiredState: - manifest: - version: v1beta1 - id: redis-slave - containers: - - name: sentinel - image: kubernetes/redis:v1 - ports: - - name: api - containerPort: 26379 - env: - - key: SENTINEL - value: "true" - labels: - name: redis-sentinel - role: sentinel - redis-sentinel: "true" -``` +We'll do the same thing for the sentinel. Here is the controller config:[redis-sentinel-controller.yaml](redis-sentinel-controller.yaml) We create it as follows: ```sh diff --git a/examples/redis/redis-controller.yaml b/examples/redis/redis-controller.yaml index 7e7eda5c6287e..62f5a1392e800 100644 --- a/examples/redis/redis-controller.yaml +++ b/examples/redis/redis-controller.yaml @@ -15,8 +15,7 @@ desiredState: image: kubernetes/redis:v1 cpu: 1000 ports: - - name: api - containerPort: 6379 + - containerPort: 6379 volumeMounts: - name: data mountPath: /redis-master-data diff --git a/examples/redis/redis-master-service.yaml b/examples/redis/redis-master-service.yaml deleted file mode 100644 index 95035a82139f5..0000000000000 --- a/examples/redis/redis-master-service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -id: redis-master -kind: Service -apiVersion: v1beta1 -port: 6379 -selector: - name: redis - role: master -labels: - name: master - role: service diff --git a/examples/redis/redis-master.yaml b/examples/redis/redis-master.yaml index 96f05f38c8c52..66bdfa5e608ab 100644 --- a/examples/redis/redis-master.yaml +++ b/examples/redis/redis-master.yaml @@ -10,8 +10,7 @@ desiredState: image: kubernetes/redis:v1 cpu: 1000 ports: - - name: api - containerPort: 6379 + - containerPort: 6379 volumeMounts: - name: data mountPath: /redis-master-data @@ -21,8 +20,7 @@ desiredState: - name: sentinel image: kubernetes/redis:v1 ports: - - name: api - containerPort: 26379 + - containerPort: 26379 env: - key: SENTINEL value: "true" diff --git a/examples/redis/redis-proxy.yaml b/examples/redis/redis-proxy.yaml index b141d5ba77631..5882c2367d0a5 100644 --- a/examples/redis/redis-proxy.yaml +++ b/examples/redis/redis-proxy.yaml @@ -16,4 +16,4 @@ labels: role: proxy - + diff --git a/examples/redis/redis-sentinel-controller.yaml b/examples/redis/redis-sentinel-controller.yaml index 7ff914bc80d38..57e34fef33b9c 100644 --- a/examples/redis/redis-sentinel-controller.yaml +++ b/examples/redis/redis-sentinel-controller.yaml @@ -14,8 +14,7 @@ desiredState: - name: sentinel image: kubernetes/redis:v1 ports: - - name: api - containerPort: 26379 + - containerPort: 26379 env: - key: SENTINEL value: "true" diff --git a/examples/redis/redis-sentinel.yaml b/examples/redis/redis-sentinel.yaml deleted file mode 100644 index 5f78006879640..0000000000000 --- a/examples/redis/redis-sentinel.yaml +++ /dev/null @@ -1,22 +0,0 @@ -id: redis-sentinel -kind: Pod -apiVersion: v1beta1 -desiredState: - manifest: - version: v1beta1 - id: redis-sentinel - containers: - - name: sentinel - image: kubernetes/redis:v1 - ports: - - name: api - containerPort: 26379 - env: - - key: SENTINEL - value: "true" -labels: - name: redis-sentinel - role: sentinel - redis-sentinel: "true" - - diff --git a/examples/redis/redis-service.yaml b/examples/redis/redis-service.yaml deleted file mode 100644 index 608024513edfe..0000000000000 --- a/examples/redis/redis-service.yaml +++ /dev/null @@ -1,9 +0,0 @@ -id: redis -kind: Service -apiVersion: v1beta1 -port: 6379 -selector: - name: redis -labels: - name: read - role: service diff --git a/examples/redis/redis-slave.yaml b/examples/redis/redis-slave.yaml deleted file mode 100644 index 4b31cfdfaf5e0..0000000000000 --- a/examples/redis/redis-slave.yaml +++ /dev/null @@ -1,26 +0,0 @@ -id: redis-slave -kind: Pod -apiVersion: v1beta1 -desiredState: - manifest: - version: v1beta1 - id: redis-slave - containers: - - name: slave - image: kubernetes/redis:v1 - cpu: 1000 - ports: - - name: api - containerPort: 6379 - volumeMounts: - - name: data - mountPath: /redis-master-data - volumes: - - name: data - source: - emptyDir: {} -labels: - name: redis - - - diff --git a/examples/redis/v1beta3/README.md b/examples/redis/v1beta3/README.md new file mode 100644 index 0000000000000..73e2dc50b6f2d --- /dev/null +++ b/examples/redis/v1beta3/README.md @@ -0,0 +1,115 @@ +## Reliable, Scalable Redis on Kubernetes + +The following document describes the deployment of a reliable, multi-node Redis on Kubernetes. It deploys a master with replicated slaves, as well as replicated redis sentinels which are use for health checking and failover. + +### Prerequisites +This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/getting-started-guides) for installation instructions for your platform. + +### A note for the impatient +This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end. + +### Turning up an initial master/sentinel pod. +is a [_Pod_](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. + +We will used the shared network namespace to bootstrap our Redis cluster. In particular, the very first sentinel needs to know how to find the master (subsequent sentinels just ask the first sentinel). Because all containers in a Pod share a network namespace, the sentinel can simply look at ```$(hostname -i):6379```. + +Here is the config for the initial master and sentinel pod: [redis-master.yaml](redis-master.yaml) + + +Create this master as follows: +```sh +kubectl create -f examples/redis/v1beta3/redis-master.yaml +``` + +### Turning up a sentinel service +In Kubernetes a _Service_ describes a set of Pods that perform the same task. For example, the set of nodes in a Cassandra cluster, or even the single node we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. + +In Redis, we will use a Kubernetes Service to provide a discoverable endpoints for the Redis sentinels in the cluster. From the sentinels Redis clients can find the master, and then the slaves and other relevant info for the cluster. This enables new members to join the cluster when failures occur. + +Here is the definition of the sentinel service:[redis-sentinel-service.yaml](redis-sentinel-service.yaml) + +Create this service: +```sh +kubectl create -f examples/redis/v1beta3/redis-sentinel-service.yaml +``` + +### Turning up replicated redis servers +So far, what we have done is pretty manual, and not very fault-tolerant. If the ```redis-master``` pod that we previously created is destroyed for some reason (e.g. a machine dying) our Redis service goes away with it. + +In Kubernetes a _Replication Controller_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state. + +Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Redis server. +[redis-controller.yaml](redis-controller.yaml) + +The bulk of this controller config is actually identical to the redis-master pod definition above. It forms the template or "cookie cutter" that defines what it means to be a member of this set. + +Create this controller: + +```sh +kubectl create -f examples/redis/v1beta3/redis-controller.yaml +``` + +We'll do the same thing for the sentinel. Here is the controller config:[redis-sentinel-controller.yaml](redis-sentinel-controller.yaml) + +We create it as follows: +```sh +kubectl create -f examples/redis/v1beta3/redis-sentinel-controller.yaml +``` + +### Resize our replicated pods +Initially creating those pods didn't actually do anything, since we only asked for one sentinel and one redis server, and they already existed, nothing changed. Now we will add more replicas: + +```sh +kubectl resize rc redis --replicas=3 +``` + +```sh +kubectl resize rc redis-sentinel --replicas=3 +``` + +This will create two additional replicas of the redis server and two additional replicas of the redis sentinel. + +Unlike our original redis-master pod, these pods exist independently, and they use the ```redis-sentinel-service``` that we defined above to discover and join the cluster. + +### Delete our manual pod +The final step in the cluster turn up is to delete the original redis-master pod that we created manually. While it was useful for bootstrapping discovery in the cluster, we really don't want the lifespan of our sentinel to be tied to the lifespan of one of our redis servers, and now that we have a successful, replicated redis sentinel service up and running, the binding is unnecessary. + +Delete the master as follows: +```sh +kubectl delete pods redis-master +``` + +Now let's take a close look at what happens after this pod is deleted. There are three things that happen: + + 1. The redis replication controller notices that its desired state is 3 replicas, but there are currently only 2 replicas, and so it creates a new redis server to bring the replica count back up to 3 + 2. The redis-sentinel replication controller likewise notices the missing sentinel, and also creates a new sentinel. + 3. The redis sentinels themselves, realize that the master has disappeared from the cluster, and begin the election procedure for selecting a new master. They perform this election and selection, and chose one of the existing redis server replicas to be the new master. + +### Conclusion +At this point we now have a reliable, scalable Redis installation. By resizing the replication controller for redis servers, we can increase or decrease the number of read-slaves in our cluster. Likewise, if failures occur, the redis-sentinels will perform master election and select a new master. + +### tl; dr +For those of you who are impatient, here is the summary of commands we ran in this tutorial + +```sh +# Create a bootstrap master +kubectl create -f examples/redis/v1beta3/redis-master.yaml + +# Create a service to track the sentinels +kubectl create -f examples/redis/v1beta3/redis-sentinel-service.yaml + +# Create a replication controller for redis servers +kubectl create -f examples/redis/v1beta3/redis-controller.yaml + +# Create a replication controller for redis sentinels +kubectl create -f examples/redis/v1beta3/redis-sentinel-controller.yaml + +# Resize both replication controllers +kubectl resize rc redis --replicas=3 +kubectl resize rc redis-sentinel --replicas=3 + +# Delete the original master pod +kubectl delete pods redis-master +``` + + diff --git a/examples/redis/v1beta3/redis-controller.yaml b/examples/redis/v1beta3/redis-controller.yaml new file mode 100644 index 0000000000000..03f667a981443 --- /dev/null +++ b/examples/redis/v1beta3/redis-controller.yaml @@ -0,0 +1,28 @@ +apiVersion: v1beta3 +kind: ReplicationController +metadata: + name: redis +spec: + replicas: 1 + selector: + name: redis + template: + metadata: + labels: + name: redis + spec: + containers: + - name: redis + image: kubernetes/redis:v1 + ports: + - containerPort: 6379 + resources: + limits: + cpu: "1" + volumeMounts: + - mountPath: /redis-master-data + name: data + volumes: + - name: data + emptyDir: {} + diff --git a/examples/redis/v1beta3/redis-master.yaml b/examples/redis/v1beta3/redis-master.yaml new file mode 100644 index 0000000000000..02abada976d24 --- /dev/null +++ b/examples/redis/v1beta3/redis-master.yaml @@ -0,0 +1,33 @@ +apiVersion: v1beta3 +kind: Pod +metadata: + labels: + name: redis + redis-sentinel: "true" + role: master + name: redis-master +spec: + containers: + - name: master + image: kubernetes/redis:v1 + env: + - name: MASTER + value: "true" + ports: + - containerPort: 6379 + resources: + limits: + cpu: "1" + volumeMounts: + - mountPath: /redis-master-data + name: data + - name: sentinel + image: kubernetes/redis:v1 + env: + - name: SENTINEL + value: "true" + ports: + - containerPort: 26379 + volumes: + - name: data + emptyDir: {} diff --git a/examples/redis/v1beta3/redis-proxy.yaml b/examples/redis/v1beta3/redis-proxy.yaml new file mode 100644 index 0000000000000..2993a45bf10b6 --- /dev/null +++ b/examples/redis/v1beta3/redis-proxy.yaml @@ -0,0 +1,14 @@ +apiVersion: v1beta3 +kind: Pod +metadata: + labels: + name: redis-proxy + role: proxy + name: redis-proxy +spec: + containers: + - name: proxy + image: kubernetes/redis-proxy:v1 + ports: + - containerPort: 6379 + name: api diff --git a/examples/redis/v1beta3/redis-sentinel-controller.yaml b/examples/redis/v1beta3/redis-sentinel-controller.yaml new file mode 100644 index 0000000000000..d75887736fae5 --- /dev/null +++ b/examples/redis/v1beta3/redis-sentinel-controller.yaml @@ -0,0 +1,23 @@ +apiVersion: v1beta3 +kind: ReplicationController +metadata: + name: redis-sentinel +spec: + replicas: 1 + selector: + redis-sentinel: "true" + template: + metadata: + labels: + name: redis-sentinel + redis-sentinel: "true" + role: sentinel + spec: + containers: + - name: sentinel + image: kubernetes/redis:v1 + env: + - name: SENTINEL + value: "true" + ports: + - containerPort: 26379 diff --git a/examples/redis/v1beta3/redis-sentinel-service.yaml b/examples/redis/v1beta3/redis-sentinel-service.yaml new file mode 100644 index 0000000000000..0cdaf50fda67f --- /dev/null +++ b/examples/redis/v1beta3/redis-sentinel-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1beta3 +kind: Service +metadata: + labels: + name: sentinel + role: service + name: redis-sentinel +spec: + containerPort: 26379 + port: 26379 + selector: + redis-sentinel: "true" diff --git a/examples/walkthrough/v1beta3/README.md b/examples/walkthrough/v1beta3/README.md new file mode 100644 index 0000000000000..680fc6765419d --- /dev/null +++ b/examples/walkthrough/v1beta3/README.md @@ -0,0 +1,68 @@ +# Kubernetes 101 - Walkthrough + +## Pods +The first atom of Kubernetes is a _pod_. A pod is a collection of containers that are symbiotically grouped. + +See [pods](../../../docs/pods.md) for more details. + +### Intro + +Trivially, a single container might be a pod. For example, you can express a simple web server as a pod:[pod1.yaml](pod1.yaml) + +A pod definition is a declaration of a _desired state_. Desired state is a very important concept in the Kubernetes model. Many things present a desired state to the system, and it is Kubernetes' responsibility to make sure that the current state matches the desired state. For example, when you create a Pod, you declare that you want the containers in it to be running. If the containers happen to not be running (e.g. program failure, ...), Kubernetes will continue to (re-)create them for you in order to drive them to the desired state. This process continues until you delete the Pod. + +See the [design document](../../../../DESIGN.md) for more details. + +### Volumes + +Now that's great for a static web server, but what about persistent storage? We know that the container file system only lives as long as the container does, so we need more persistent storage. To do this, you also declare a ```volume``` as part of your pod, and mount it into a container: +[pod2.yaml](pod2.yaml) + +In Kubernetes, ```emptyDir``` Volumes live for the lifespan of the Pod, which is longer than the lifespan of any one container, so if the container fails and is restarted, our persistent storage will live on. + +If you want to mount a directory that already exists in the file system (e.g. ```/var/logs```) you can use the ```hostDir``` directive. + +See [volumes](../../../docs/volumes.md) for more details. + +### Multiple Containers + +_Note: +The examples below are syntactically correct, but some of the images (e.g. kubernetes/git-monitor) don't exist yet. We're working on turning these into working examples._ + + +However, often you want to have two different containers that work together. An example of this would be a web server, and a helper job that polls a git repository for new updates: + +```yaml +apiVersion: v1beta3 +kind: Pod +metadata: + name: www +spec: + containers: + - name: nginx + image: dockerfile/nginx + volumeMounts: + - mountPath: /srv/www + name: www-data + readOnly: true + - name: git-monitor + image: kubernetes/git-monitor + env: + - name: GIT_REPO + value: http://github.com/some/repo.git + volumeMounts: + - mountPath: /data + name: www-data + volumes: + - name: www-data + emptyDir: {} +``` + +Note that we have also added a volume here. In this case, the volume is mounted into both containers. It is marked ```readOnly``` in the web server's case, since it doesn't need to write to the directory. + +Finally, we have also introduced an environment variable to the ```git-monitor``` container, which allows us to parameterize that container with the particular git repository that we want to track. + + +### What's next? +Continue on to [Kubernetes 201](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough/k8s201.md) or +for a complete application see the [guestbook example](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/guestbook/README.md) diff --git a/examples/walkthrough/v1beta3/k8s201.md b/examples/walkthrough/v1beta3/k8s201.md new file mode 100644 index 0000000000000..11e643761e976 --- /dev/null +++ b/examples/walkthrough/v1beta3/k8s201.md @@ -0,0 +1,86 @@ +# Kubernetes 201 - Labels, Replication Controllers, Services and Health Checking + +### Overview +When we had just left off in the [previous episode](README.md) we had learned about pods, multiple containers and volumes. +We'll now cover some slightly more advanced topics in Kubernetes, related to application productionization, deployment and +scaling. + +### Labels +Having already learned about Pods and how to create them, you may be struck by an urge to create many, many pods. Please do! But eventually you will need a system to organize these pods into groups. The system for achieving this in Kubernetes is Labels. Labels are key-value pairs that are attached to each API object in Kubernetes. Label selectors can be passed along with a RESTful ```list``` request to the apiserver to retrieve a list of objects which match that label selector. For example: + +```sh +cluster/kubectl.sh get pods -l name=nginx +``` + +Lists all pods who name label matches 'nginx'. Labels are discussed in detail [elsewhere](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/labels.md), but they are a core concept for two additional building blocks for Kubernetes, Replication Controllers and Services + +### Replication Controllers + +OK, now you have an awesome, multi-container, labelled pod and you want to use it to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogenous? + +Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single API object. The replica controller also contains a label selector that identifies the set of objects managed by the replica controller. The replica controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods. The design of replica controllers is discussed in detail [elsewhere](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/replication-controller.md). + +An example replica controller that instantiates two pods running nginx looks like: [replication-controller](replication-controller.yaml) + + +### Services +Once you have a replicated set of pods, you need an abstraction that enables connectivity between the layers of your application. For example, if you have a replication controller managing your backend jobs, you don't want to have to reconfigure your front-ends whenever you re-scale your backends. Likewise, if the pods in your backends are scheduled (or rescheduled) onto different machines, you can't be required to re-configure your front-ends. In Kubernetes the Service API object achieves these goals. A Service basically combines an IP address and a label selector together to form a simple, static rallying point for connecting to a micro-service in your application. + +For example, here is a service that balances across the pods created in the previous nginx replication controller example: [service](service.yaml) + +When created, each service is assigned a unique IP address. This address is tied to the lifespan of the Service, and will not change while the Service is alive. Pods can be configured to talk to the service, and know that communication to the service will be automatically load-balanced out to some pod that is a member of the set identified by the label selector in the Service. Services are described in detail [elsewhere](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md). + +### Health Checking +When I write code it never crashes, right? Sadly the [kubernetes issues list](https://github.com/GoogleCloudPlatform/kubernetes/issues) indicates otherwise... + +Rather than trying to write bug-free code, a better approach is to use a management system to perform periodic health checking +and repair of your application. That way, a system, outside of your application itself, is responsible for monitoring the +application and taking action to fix it. It's important that the system be outside of the application, since of course, if +your application fails, and the health checking agent is part of your application, it may fail as well, and you'll never know. +In Kubernetes, the health check monitor is the Kubelet agent. + +#### Low level process health-checking + +The simplest form of health-checking is just process level health checking. The Kubelet constantly asks the Docker daemon +if the container process is still running, and if not, the container process is restarted. In all of the Kubernetes examples +you have run so far, this health checking was actually already enabled. It's on for every single container that runs in +Kubernetes. + +#### Application health-checking + +However, in many cases, this low-level health checking is insufficient. Consider for example, the following code: + +```go +lockOne := sync.Mutex{} +lockTwo := sync.Mutex{} + +go func() { + lockOne.Lock(); + lockTwo.Lock(); + ... +}() + +lockTwo.Lock(); +lockOne.Lock(); +``` + +This is a classic example of a problem in computer science known as "Deadlock". From Docker's perspective your application is +still operating, the process is still running, but from your application's perspective, your code is locked up, and will never respond correctly. + +To address this problem, Kubernetes supports user implemented application health-checks. These checks are performed by the +Kubelet to ensure that your application is operating correctly for a definition of "correctly" that _you_ provide. + +Currently, there are three types of application health checks that you can choose from: + + * HTTP Health Checks - The Kubelet will call a web hook. If it returns between 200 and 399, it is considered success, failure otherwise. + * Container Exec - The Kubelet will execute a command inside your container. If it returns "ok" it will be considered a success. + * TCP Socket - The Kubelet will attempt to open a socket to your container. If it can establish a connection, the container is considered healthy, if it can't it is considered a failure. + +In all cases, if the Kubelet discovers a failure, the container is restarted. + +The container health checks are configured in the "LivenessProbe" section of your container config. There you can also specify an "initialDelaySeconds" that is a grace period from when the container is started to when health checks are performed, to enable your container to perform any necessary initialization. + +Here is an example config for a pod with an HTTP health check: [pod-with-http-healthcheck](pod-with-http-healthcheck.yaml) + +### What's next? +For a complete application see the [guestbook example](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/guestbook). diff --git a/examples/walkthrough/v1beta3/pod-with-http-healthcheck.yaml b/examples/walkthrough/v1beta3/pod-with-http-healthcheck.yaml new file mode 100644 index 0000000000000..0b6ee1832212f --- /dev/null +++ b/examples/walkthrough/v1beta3/pod-with-http-healthcheck.yaml @@ -0,0 +1,21 @@ +apiVersion: v1beta3 +kind: Pod +metadata: + name: pod-with-healthcheck +spec: + containers: + - name: nginx + image: dockerfile/nginx + # defines the health checking + livenessProbe: + # an http probe + httpGet: + path: /_status/healthz + port: 8080 + # length of time to wait for a pod to initialize + # after pod startup, before applying health checking + initialDelaySeconds: 30 + timeoutSeconds: 1 + ports: + - containerPort: 80 + diff --git a/examples/walkthrough/v1beta3/pod1.yaml b/examples/walkthrough/v1beta3/pod1.yaml new file mode 100644 index 0000000000000..56ae58448abd3 --- /dev/null +++ b/examples/walkthrough/v1beta3/pod1.yaml @@ -0,0 +1,8 @@ +apiVersion: v1beta3 +kind: Pod +metadata: + name: www +spec: + containers: + - name: nginx + image: dockerfile/nginx diff --git a/examples/walkthrough/v1beta3/pod2.yaml b/examples/walkthrough/v1beta3/pod2.yaml new file mode 100644 index 0000000000000..c37a00324dc42 --- /dev/null +++ b/examples/walkthrough/v1beta3/pod2.yaml @@ -0,0 +1,17 @@ +apiVersion: v1beta3 +kind: Pod +metadata: + name: storage +spec: + containers: + - name: redis + image: dockerfile/redis + volumeMounts: + # name must match the volume name below + - name: redis-persistent-storage + # mount path within the container + mountPath: /data/redis + volumes: + - name: redis-persistent-storage + emptyDir: {} + diff --git a/examples/walkthrough/v1beta3/replication-controller.yaml b/examples/walkthrough/v1beta3/replication-controller.yaml new file mode 100644 index 0000000000000..568f2d902708b --- /dev/null +++ b/examples/walkthrough/v1beta3/replication-controller.yaml @@ -0,0 +1,24 @@ +apiVersion: v1beta3 +kind: ReplicationController +metadata: + name: nginx-controller +spec: + replicas: 2 + # selector identifies the set of Pods that this + # replicaController is responsible for managing + selector: + name: nginx + # podTemplate defines the 'cookie cutter' used for creating + # new pods when necessary + template: + metadata: + labels: + # Important: these labels need to match the selector above + # The api server enforces this constraint. + name: nginx + spec: + containers: + - name: nginx + image: dockerfile/nginx + ports: + - containerPort: 80 diff --git a/examples/walkthrough/v1beta3/service.yaml b/examples/walkthrough/v1beta3/service.yaml new file mode 100644 index 0000000000000..3cd285fd5757a --- /dev/null +++ b/examples/walkthrough/v1beta3/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1beta3 +kind: Service +metadata: + name: nginx-example +spec: + # the container on each pod to connect to, can be a name + # (e.g. 'www') or a number (e.g. 80) + containerPort: 80 + # the port that this service should serve on + port: 8000 + protocol: TCP + # just like the selector in the replication controller, + # but this time it identifies the set of pods to load balance + # traffic to. + selector: + name: nginx + +