Skip to content

Commit

Permalink
Launch Elasticsearch and Kibana automatically
Browse files Browse the repository at this point in the history
  • Loading branch information
satnam6502 committed Jan 9, 2015
1 parent 21b661e commit 295bd37
Show file tree
Hide file tree
Showing 46 changed files with 404 additions and 18,678 deletions.
32 changes: 32 additions & 0 deletions cluster/addons/fluentd-elasticsearch/es-controller.yaml.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
apiVersion: v1beta1
kind: ReplicationController
id: elasticsearch-logging-controller
desiredState:
replicas: {ELASTICSEARCH_LOGGING_REPLICAS}
replicaSelector:
name: elasticsearch-logging
podTemplate:
desiredState:
manifest:
version: v1beta1
id: es-log-ingestion
containers:
- name: elasticsearch-logging
image: dockerfile/elasticsearch
ports:
- name: es-port
containerPort: 9200
- name: es-transport-port
containerPort: 9300
volumeMounts:
- name: es-persistent-storage
mountPath: /data
volumes:
- name: es-persistent-storage
source:
emptyDir: {}
labels:
name: elasticsearch-logging
labels:
name: elasticsearch-logging

Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
apiVersion: v1beta1
kind: Service
id: elasticsearch
id: elasticsearch-logging
containerPort: es-port
port: 9200
selector:
app: elasticsearch
name: elasticsearch-logging
createExternalLoadBalancer: true
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
.PHONY: build push

TAG = latest
TAG = 1.0

build:
sudo docker build -t kubernetes/fluentd-elasticsearch:$(TAG) .
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
type elasticsearch
log_level info
include_tag_key true
host elasticsearch.default
host elasticsearch-logging.default
port 9200
logstash_format true
flush_interval 5s
Expand All @@ -69,7 +69,7 @@
type elasticsearch
log_level info
include_tag_key true
host elasticsearch.default
host elasticsearch-logging.default
port 9200
logstash_format true
flush_interval 5s
Expand Down
22 changes: 22 additions & 0 deletions cluster/addons/fluentd-elasticsearch/kibana-controller.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: v1beta1
kind: ReplicationController
id: kibana-logging-controller
desiredState:
replicas: 1
replicaSelector:
name: kibana-logging
podTemplate:
desiredState:
manifest:
version: v1beta1
id: kibana-viewer
containers:
- name: kibana-logging
image: kubernetes/kibana:1.0
ports:
- name: kibana-port
containerPort: 80
labels:
name: kibana-logging
labels:
name: kibana-logging
File renamed without changes.
9 changes: 9 additions & 0 deletions cluster/addons/fluentd-elasticsearch/kibana-image/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
.PHONY: build push

TAG = 1.0

build:
docker build -t kubernetes/kibana:$(TAG) .

push:
docker push kubernetes/kibana:$(TAG)
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,16 @@
# PROXY_PORT is set to 9200 because Elasticsearch is running on the
# same name as Kibana. If KIBANA_IP is the external IP address of
# the Kubernetes Kibna service then all requests to:
# KIBANA_SERVICE:$ES_PORT/elasticsearch/XXX
# KIBANA_LOGGING_SERVICE:$ES_PORT/elasticsearch/XXX
# are proxied to:
# http://127.0.0.1:9200/XXX
# 2. Elasticsearch and Kibana are run in separate pods and Elasticsearch
# has an IP and port exposed via a Kubernetes service. In this case
# the Elasticsearch service *must* be called 'elasticsearch' and then
# all requests sent to:
# KIBANA_SERVICE:$ES_PORT/elasticsearch/XXX
# KIBANA_LOGGING_SERVICE:$ES_PORT/elasticsearch/XXX
# are proxied to:
# http://$ELASTICSEARCH_SERVICE_HOST:$ELASTICSEARCH_SERVICE_PORT:9200/XXX
# http://$ELASTICSEARCH_LOGGING_SERVICE_HOST:$ELASTICSEARCH_LOGGING_SERVICE_PORT:9200/XXX
# The proxy configuration occurs in a location block of the nginx configuration
# file /etc/nginx/sites-available/default.

Expand All @@ -57,9 +57,9 @@ echo ES_PORT=$ES_PORT
# code in the configuration for nginx. If a Kubernetes Elasticsearch
# service called 'elasticsearch' is defined, use that. Otherwise, use
# a local instance of Elasticsearch on port 9200.
PROXY_HOST=${ELASTICSEARCH_SERVICE_HOST:-127.0.0.1}
PROXY_HOST=${ELASTICSEARCH_LOGGING_SERVICE_HOST:-127.0.0.1}
echo PROXY_HOST=${PROXY_HOST}
PROXY_PORT=${ELASTICSEARCH_SERVICE_PORT:-9200}
PROXY_PORT=${ELASTICSEARCH_SERVICE_LOGGING_PORT:-9200}
echo PROXY_PORT=${PROXY_PORT}
# Test the connection to Elasticsearch
echo "Running curl http://${PROXY_HOST}:${PROXY_PORT}"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
apiVersion: v1beta1
kind: Service
id: kibana
id: kibana-logging
containerPort: kibana-port
port: 5601
selector:
app: kibana-viewer
name: kibana-logging
createExternalLoadBalancer: true


34 changes: 34 additions & 0 deletions cluster/addons/fluentd-elasticsearch/logging-demo/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Makefile for launching syntheitc logging sources (any platform)
# and for reporting the forwarding rules for the
# Elasticsearch and Kibana pods for the GCE platform.


.PHONY: up down logger-up logger-down logger10-up logger10-downget net

KUBECTL=../../../kubectl.sh

up: logger-up logger10-up

down: logger-down logger10-down


logger-up:
-${KUBECTL} create -f synthetic_0_25lps.yaml

logger-down:
-${KUBECTL} delete pods synthetic-logger-0.25lps-pod

logger10-up:
-${KUBECTL} create -f synthetic_10lps.yaml

logger10-down:
-${KUBECTL} delete pods synthetic-logger-10lps-pod

get:
${KUBECTL} get pods
${KUBECTL} get replicationControllers
${KUBECTL} get services

net:
gcloud compute forwarding-rules describe elasticsearch-logging
gcloud compute forwarding-rules describe kibana-logging
111 changes: 111 additions & 0 deletions cluster/addons/fluentd-elasticsearch/logging-demo/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# Elasticsearch/Kibana Logging Demonstration
This directory contains two pod specifications which can be used as synthetic
loggig sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml)
describes a pod that just emits a log message once every 4 seconds:
```
# This pod specification creates an instance of a synthetic logger. The logger
# is simply a program that writes out the hostname of the pod, a count which increments
# by one on each iteration (to help notice missing log enteries) and the date using
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
# and could have been written out as:
# i="0"
# while true
# do
# echo -n "`hostname`: $i: "
# date --rfc-3339 ns
# sleep 4
# i=$[$i+1]
# done
apiVersion: v1beta1
kind: Pod
id: synthetic-logger-0.25lps-pod
desiredState:
manifest:
version: v1beta1
id: synth-logger-0.25lps
containers:
- name: synth-lgr
image: ubuntu:14.04
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
labels:
name: synth-logging-source
```

The other YAML file [synthetic_10lps.yaml](synthetic_10lps.yaml) specifies a similar synthetic logger that emits 10 log messages every second. To run both synthetic loggers:
```
$ make up
../../../kubectl.sh create -f synthetic_0_25lps.yaml
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_0_25lps.yaml
synthetic-logger-0.25lps-pod
../../../kubectl.sh create -f synthetic_10lps.yaml
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_10lps.yaml
synthetic-logger-10lps-pod
```

Visiting the Kibana dashboard should make it clear that logs are being collected from the two synthetic loggers:
![Synthetic loggers](synth-logger.png)

You can report the running pods, replication controllers and services with another Makefile rule:
```
$ make get
../../../kubectl.sh get pods
Running: ../../../../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get pods
POD CONTAINER(S) IMAGE(S) HOST LABELS STATUS
7e1c7ce6-9764-11e4-898c-42010af03582 kibana-logging kubernetes/kibana kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=kibana-logging Running
synthetic-logger-0.25lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=synth-logging-source Running
synthetic-logger-10lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-1.c.kubernetes-elk.internal/146.148.42.44 name=synth-logging-source Running
influx-grafana influxdb kubernetes/heapster_influxdb kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=influxdb Running
grafana kubernetes/heapster_grafana
elasticsearch dockerfile/elasticsearch
heapster heapster kubernetes/heapster kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=heapster Running
67cfcb1f-9764-11e4-898c-42010af03582 etcd quay.io/coreos/etcd:latest kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 k8s-app=skydns Running
kube2sky kubernetes/kube2sky:1.0
skydns kubernetes/skydns:2014-12-23-001
6ba20338-9764-11e4-898c-42010af03582 elasticsearch-logging dockerfile/elasticsearch kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=elasticsearch-logging Running
../../../cluster/kubectl.sh get replicationControllers
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get replicationControllers
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
skydns etcd quay.io/coreos/etcd:latest k8s-app=skydns 1
kube2sky kubernetes/kube2sky:1.0
skydns kubernetes/skydns:2014-12-23-001
elasticsearch-logging-controller elasticsearch-logging dockerfile/elasticsearch name=elasticsearch-logging 1
kibana-logging-controller kibana-logging kubernetes/kibana name=kibana-logging 1
../../.../kubectl.sh get services
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get services
NAME LABELS SELECTOR IP PORT
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.83.3 80
kubernetes component=apiserver,provider=kubernetes <none> 10.0.79.4 443
influx-master <none> name=influxdb 10.0.232.223 8085
skydns k8s-app=skydns k8s-app=skydns 10.0.0.10 53
elasticsearch-logging <none> name=elasticsearch-logging 10.0.25.103 9200
kibana-logging <none> name=kibana-logging 10.0.208.114 5601
```
On the GCE provider you can also obtain the external IP addresses of the Elasticsearch and Kibana services:
```
$ make net
IPAddress: 130.211.120.118
IPProtocol: TCP
creationTimestamp: '2015-01-08T10:30:34.210-08:00'
id: '12815488049392139704'
kind: compute#forwardingRule
name: elasticsearch-logging
portRange: 9200-9200
region: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/forwardingRules/elasticsearch-logging
target: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/targetPools/elasticsearch-logging
gcloud compute forwarding-rules describe kibana-logging
IPAddress: 146.148.40.158
IPProtocol: TCP
creationTimestamp: '2015-01-08T10:31:05.715-08:00'
id: '2755171906970792849'
kind: compute#forwardingRule
name: kibana-logging
portRange: 5601-5601
region: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/forwardingRules/kibana-logging
target: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/targetPools/kibana-logging
```
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
10 changes: 6 additions & 4 deletions cluster/aws/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,14 @@ MINION_SCOPES=""
POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16"

# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true

# Optional: Install node logging
ENABLE_NODE_LOGGING=true
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp

# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1

IAM_PROFILE="kubernetes"
LOG="/dev/null"

Expand Down
8 changes: 8 additions & 0 deletions cluster/aws/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -497,3 +497,11 @@ function kube-down {
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG
$AWS_CMD delete-vpc --vpc-id $vpc_id > $LOG
}

function setup-logging {
echo "TODO: setup logging"
}

function teardown-logging {
echo "TODO: teardown logging"
}
8 changes: 8 additions & 0 deletions cluster/azure/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,11 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES=""

PORTAL_NET="10.250.0.0/16"

# Optional: Install node logging
ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp

# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=false
ELASTICSEARCH_LOGGING_REPLICAS=1
8 changes: 8 additions & 0 deletions cluster/azure/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -565,3 +565,11 @@ function setup-monitoring {
function teardown-monitoring {
echo "not implemented" >/dev/null
}

function setup-logging {
echo "TODO: setup logging"
}

function teardown-logging {
echo "TODO: teardown logging"
}
4 changes: 4 additions & 0 deletions cluster/gce/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ ENABLE_DOCKER_REGISTRY_CACHE=true
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp

# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING=true
ELASTICSEARCH_LOGGING_REPLICAS=1

# Don't require https for registries in our local RFC1918 network
EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8"

Expand Down
Loading

0 comments on commit 295bd37

Please sign in to comment.