diff --git a/README.md b/README.md index 98fb2be6762f3..97e080c8f1806 100644 --- a/README.md +++ b/README.md @@ -23,11 +23,12 @@ While the concepts and architecture in Kubernetes represent years of experience * [CoreOS](docs/getting-started-guides/coreos.md) * [OpenStack](https://developer.rackspace.com/blog/running-coreos-and-kubernetes/) * [CloudStack](docs/getting-started-guides/cloudstack.md) + * [Rackspace](docs/getting-started-guides/rackspace.md) + * The following clouds are currently broken at Kubernetes head. Please sync your client to `v0.3` (`git checkout v0.3`) to use these: * [Locally](docs/getting-started-guides/locally.md) * [vSphere](docs/getting-started-guides/vsphere.md) * [Microsoft Azure](docs/getting-started-guides/azure.md) - * [Rackspace](docs/getting-started-guides/rackspace.md) * [Kubernetes 101](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough) * [kubecfg command line tool](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/cli.md) * [Kubernetes API Documentation](http://cdn.rawgit.com/GoogleCloudPlatform/kubernetes/31a0daae3627c91bc96e1f02a6344cd76e294791/api/kubernetes.html) diff --git a/cluster/rackspace/cloud-config/master-cloud-config.yaml b/cluster/rackspace/cloud-config/master-cloud-config.yaml new file mode 100644 index 0000000000000..477db802f52e3 --- /dev/null +++ b/cluster/rackspace/cloud-config/master-cloud-config.yaml @@ -0,0 +1,152 @@ +#cloud-config + +write_files: + - path: /opt/bin/regen-minion-list.sh + permissions: 0755 + content: | + #!/bin/sh + m=$(echo $(etcdctl ls --recursive /corekube/minions | cut -d/ -f4 | sort) | tr ' ' ,) + echo "Found $m" + mkdir -p /run/apiserver + echo "MINIONS=$m" > /run/apiserver/minions.env + - path: /opt/bin/git-kubernetes-nginx.sh + permissions: 0755 + content: | + #!/bin/bash + git clone https://github.com/doublerr/kubernetes_nginx /opt/kubernetes_nginx + /usr/bin/cp /opt/.kubernetes_auth /opt/kubernetes_nginx/.kubernetes_auth + docker build -t kubernetes_nginx:latest /opt/kubernetes_nginx + - path: /opt/bin/download-release.sh + permissions: 0755 + content: | + #!/bin/bash + OBJECT_URL="CLOUD_FILES_URL" + echo "Downloading release ($OBJECT_URL)" + wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz + echo "Unpacking release" + rm -rf /opt/kubernetes || false + tar xzf /opt/kubernetes.tar.gz -C /opt/ + - path: /opt/.kubernetes_auth + permissions: 0600 + content: | + KUBE_USER:KUBE_PASSWORD + +coreos: + etcd: + name: kubernetes-master + discovery: https://discovery.etcd.io/DISCOVERY_ID + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + peer-bind-addr: $private_ipv4:7001 + + fleet: + public-ip: $private_ipv4 + metadata: kubernetes_role=master + + update: + reboot-strategy: etcd-lock + + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + - name: download-release.service + command: start + content: | + [Unit] + Description=Downloads Kubernetes Release + After=network-online.target + Requires=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/bash /opt/bin/download-release.sh + - name: master-apiserver.service + command: start + content: | + [Unit] + Description=Kubernetes API Server + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=minion-finder.service + Requires=minion-finder.service + After=download-release.service + Requires=download-release.service + [Service] + EnvironmentFile=-/run/apiserver/minions.env + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/apiserver /opt/bin/apiserver + ExecStart=/opt/bin/apiserver --address=127.0.0.1 --port=8080 --machines=${MINIONS} --etcd_servers=http://127.0.0.1:4001 --portal_net=PORTAL_NET --logtostderr=true + Restart=always + RestartSec=2 + - name: master-apiserver-sighup.path + command: start + content: | + [Path] + PathChanged=/run/apiserver/minions.env + - name: master-apiserver-sighup.service + command: start + content: | + [Service] + ExecStart=/usr/bin/pkill -SIGHUP -f apiserver + - name: minion-finder.service + command: start + content: | + [Unit] + Description=Kubernetes Minion finder + After=network-online.target + Requires=network-online.target + After=etcd.service + Requires=etcd.service + [Service] + ExecStartPre=/opt/bin/regen-minion-list.sh + ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/minions -- /opt/bin/regen-minion-list.sh + Restart=always + RestartSec=30 + - name: master-controller-manager.service + command: start + content: | + [Unit] + Description=Kubernetes Controller Manager + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=master-apiserver.service + Requires=master-apiserver.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/controller-manager /opt/bin/controller-manager + ExecStart=/opt/bin/controller-manager --master=127.0.0.1:8080 --logtostderr=true + Restart=always + RestartSec=2 + - name: master-scheduler.service + command: start + content: | + [Unit] + Description=Kubernetes Scheduler + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=master-apiserver.service + Requires=master-apiserver.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/scheduler /opt/bin/scheduler + ExecStart=/opt/bin/scheduler --master=127.0.0.1:8080 --logtostderr=true + Restart=always + RestartSec=10 + #Running nginx service with --net="host" is a necessary evil until running all k8s services in docker. + - name: kubernetes-nginx.service + command: start + content: | + [Unit] + Description=Kubernetes Nginx Service + After=network-online.target + Requires=network-online.target + After=docker.service + Requires=docker.service + [Service] + ExecStartPre=/opt/bin/git-kubernetes-nginx.sh + ExecStart=/usr/bin/docker run --rm --net="host" -p "443:443" -t --name "kubernetes_nginx" kubernetes_nginx + ExecStop=/usr/bin/docker stop kubernetes_nginx + Restart=always + RestartSec=15 diff --git a/cluster/rackspace/cloud-config/minion-cloud-config.yaml b/cluster/rackspace/cloud-config/minion-cloud-config.yaml new file mode 100644 index 0000000000000..40c30dbbaa24f --- /dev/null +++ b/cluster/rackspace/cloud-config/minion-cloud-config.yaml @@ -0,0 +1,224 @@ +#cloud-config + +write_files: + - path: /opt/bin/kube-net-update.sh + permissions: 0755 + content: | + #!/bin/sh + set -x -e + nh=${ETCD_WATCH_KEY##*/} + net=$ETCD_WATCH_VALUE + case $ETCD_WATCH_ACTION in + set) ip route replace $net via $nh dev eth2 metric 900 ;; + expire) ip route del $net via $nh metric 900 ;; + esac + - path: /opt/bin/download-release.sh + permissions: 0755 + content: | + #!/bin/bash + OBJECT_URL="CLOUD_FILES_URL" + echo "Downloading release ($OBJECT_URL)" + wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz + echo "Unpacking release" + rm -rf /opt/kubernetes || false + tar xzf /opt/kubernetes.tar.gz -C /opt/ + - path: /opt/kubernetes-manifests/cadvisor.manifest + permissions: 0755 + content: | + version: v1beta2 + id: cadvisor-agent + containers: + - name: cadvisor + image: google/cadvisor:latest + ports: + - name: http + containerPort: 8080 + hostPort: 4194 + volumeMounts: + - name: varrun + mountPath: /var/run + readOnly: false + - name: varlibdocker + mountPath: /var/lib/docker + readOnly: true + - name: cgroups + mountPath: /sys/fs/cgroup + readOnly: true + volumes: + - name: varrun + source: + hostDir: + path: /var/run + - name: varlibdocker + source: + hostDir: + path: /var/lib/docker + - name: cgroups + source: + hostDir: + path: /sys/fs/cgroup + +coreos: + etcd: + name: kubernetes-minion-INDEX + discovery: https://discovery.etcd.io/DISCOVERY_ID + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + peer-bind-addr: $private_ipv4:7001 + + fleet: + public-ip: $private_ipv4 + metadata: kubernetes_role=minion + + update: + reboot-strategy: etcd-lock + + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + - name: download-release.service + command: start + content: | + [Unit] + Description=Downloads Kubernetes Release + After=network-online.target + Requires=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/bash /opt/bin/download-release.sh + - name: minion-kubelet.service + command: start + content: | + [Unit] + Description=Kubernetes Kubelet + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=docker.service + Requires=docker.service + After=download-release.service + Requires=download-release.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kubelet /opt/bin/kubelet + ExecStart=/opt/bin/kubelet --address=$private_ipv4 --hostname_override=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --config=/opt/kubernetes-manifests + Restart=always + RestartSec=2 + - name: minion-proxy.service + command: start + content: | + [Unit] + Description=Kubernetes Proxy + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=docker.service + Requires=docker.service + After=download-release.service + Requires=download-release.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/proxy /opt/bin/proxy + ExecStart=/opt/bin/proxy --bind_address=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true + Restart=always + RestartSec=2 + - name: minion-advertiser.service + command: start + content: | + [Unit] + Description=Kubernetes Minion Advertiser + After=etcd.service + Requires=etcd.service + After=minion-kubelet.service + [Service] + ExecStart=/bin/sh -c 'while :; do etcdctl set /corekube/minions/$private_ipv4 $private_ipv4 --ttl 300; sleep 120; done' + Restart=always + RestartSec=120 + - name: net-advertiser.service + command: start + content: | + [Unit] + Description=Kubernetes Network Advertiser + After=etcd.service + Requires=etcd.service + After=minion-kubelet.service + [Service] + ExecStart=/bin/sh -c 'eth2_ip=$$(ip -o -f inet a show dev eth2 | sed "s/.* inet \([0-9.]\+\).*/\1/"); while :; do etcdctl set /corekube/net/$$eth2_ip 10.240.INDEX.0/24 --ttl 300; sleep 120; done' + Restart=always + RestartSec=120 + - name: net-router.service + command: start + content: | + [Unit] + Description=Kubernetes Network Router + After=etcd.service + Requires=etcd.service + After=minion-kubelet.service + [Service] + ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/net -- /opt/bin/kube-net-update.sh + Restart=always + RestartSec=120 + - name: cbr0.netdev + command: start + content: | + [NetDev] + Kind=bridge + Name=cbr0 + - name: cbr0.network + command: start + content: | + [Match] + Name=cbr0 + + [Network] + Address=10.240.INDEX.1/24 + - name: nat.service + command: start + content: | + [Unit] + Description=NAT container->outside traffic + + [Service] + ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth0 -s 10.240.INDEX.0/24 -j MASQUERADE + ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth1 -s 10.240.INDEX.0/24 -j MASQUERADE + RemainAfterExit=yes + Type=oneshot + - name: docker.service + command: start + content: | + [Unit] + After=network.target + Description=Docker Application Container Engine + Documentation=http://docs.docker.io + + [Service] + ExecStartPre=/bin/mount --make-rprivate / + ExecStart=/usr/bin/docker -d -s=btrfs -H fd:// -b cbr0 --iptables=false + Restart=always + RestartSec=30 + + [Install] + WantedBy=multi-user.target + - name: format-data.service + command: start + content: | + [Unit] + Description=Formats data drive + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/sbin/wipefs -f /dev/xvde1 + ExecStart=/usr/sbin/mkfs.btrfs -f /dev/xvde1 + - name: var-lib-docker-volumes.mount + command: start + content: | + [Unit] + Description=Mount data drive to /var/lib/docker/volumes + Requires=format-data.service + After=format-data.service + Before=docker.service + [Mount] + What=/dev/xvde1 + Where=/var/lib/docker/volumes + Type=btrfs diff --git a/icebox/cluster/rackspace/config-default.sh b/cluster/rackspace/config-default.sh similarity index 84% rename from icebox/cluster/rackspace/config-default.sh rename to cluster/rackspace/config-default.sh index 6d4837a6fc7ee..7fb84a37a466d 100644 --- a/icebox/cluster/rackspace/config-default.sh +++ b/cluster/rackspace/config-default.sh @@ -19,7 +19,7 @@ # KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_MINION_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME # Shared -KUBE_IMAGE="${KUBE_IMAGE-255df5fb-e3d4-45a3-9a07-c976debf7c14}" # Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) +KUBE_IMAGE="${KUBE_IMAGE-b63e1435-a46f-4726-b984-e3f15ae92753}" # CoreOS(Beta) SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}" NOVA_NETWORK_LABEL="kubernetes-pool-net" NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}" @@ -28,12 +28,12 @@ INSTANCE_PREFIX="kubernetes" # Master KUBE_MASTER_FLAVOR="${KUBE_MASTER_FLAVOR-performance1-1}" MASTER_NAME="${INSTANCE_PREFIX}-master" -MASTER_TAG="tag=${INSTANCE_PREFIX}-master" +MASTER_TAG="tags=${INSTANCE_PREFIX}-master" # Minion -KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-1}" +KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-2}" RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}" -MINION_TAG="tag=${INSTANCE_PREFIX}-minion" +MINION_TAG="tags=${INSTANCE_PREFIX}-minion" MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}})) KUBE_NETWORK=($(eval echo "10.240.{1..${RAX_NUM_MINIONS}}.0/24")) PORTAL_NET="10.0.0.0/16" diff --git a/icebox/cluster/rackspace/kube-up.sh b/cluster/rackspace/kube-up.sh similarity index 100% rename from icebox/cluster/rackspace/kube-up.sh rename to cluster/rackspace/kube-up.sh diff --git a/icebox/cluster/rackspace/util.sh b/cluster/rackspace/util.sh similarity index 57% rename from icebox/cluster/rackspace/util.sh rename to cluster/rackspace/util.sh index 30a3293894f53..86f898f273875 100644 --- a/icebox/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -18,16 +18,61 @@ # Use the config file specified in $KUBE_CONFIG_FILE, or default to # config-default.sh. +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} verify-prereqs() { # Make sure that prerequisites are installed. - for x in nova; do + for x in nova swiftly; do if [ "$(which $x)" == "" ]; then echo "cluster/rackspace/util.sh: Can't find $x in PATH, please fix and retry." exit 1 fi done + + if [[ -z "${OS_AUTH_URL-}" ]]; then + echo "cluster/rackspace/util.sh: OS_AUTH_URL not set." + echo -e "\texport OS_AUTH_URL=https://identity.api.rackspacecloud.com/v2.0/" + return 1 + fi + + if [[ -z "${OS_USERNAME-}" ]]; then + echo "cluster/rackspace/util.sh: OS_USERNAME not set." + echo -e "\texport OS_USERNAME=myusername" + return 1 + fi + + if [[ -z "${OS_PASSWORD-}" ]]; then + echo "cluster/rackspace/util.sh: OS_PASSWORD not set." + echo -e "\texport OS_PASSWORD=myapikey" + return 1 + fi +} + +# Ensure that we have a password created for validating to the master. Will +# read from $HOME/.kubernetres_auth if available. +# +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +get-password() { + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') + KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') + return + fi + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + + # Store password for reuse. + cat << EOF > "$file" +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD" +} +EOF + chmod 0600 "$file" } rax-ssh-key() { @@ -45,37 +90,72 @@ rax-ssh-key() { fi } -find-object-url() { - if [ -n "$1" ]; then - CONTAINER=$1 - else - local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/rackspace/config.sh - if [ -f $(dirname $0)/../release/rackspace/config.sh ]; then - . $RELEASE_CONFIG_SCRIPT - fi +find-release-tars() { + SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" + RELEASE_DIR="${KUBE_ROOT}/server/" + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" + RELEASE_DIR="${KUBE_ROOT}/_output/release-tars/" fi + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz" + exit 1 + fi +} + +rackspace-set-vars() { - TEMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET $1/$2) + CLOUDFILES_CONTAINER="kubernetes-releases-${OS_USERNAME}" + CONTAINER_PREFIX=${CONTAINER_PREFIX-devel/} + find-release-tars +} + +# Retrieves a tempurl from cloudfiles to make the release object publicly accessible temporarily. +find-object-url() { + + rackspace-set-vars + + KUBE_TAR=${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz + + RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${KUBE_TAR}) echo "cluster/rackspace/util.sh: Object temp URL:" - echo -e "\t${TEMP_URL}" + echo -e "\t${RELEASE_TMP_URL}" + +} + +ensure_dev_container() { + SWIFTLY_CMD="swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD}" + + if ! ${SWIFTLY_CMD} get ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 ; then + echo "cluster/rackspace/util.sh: Container doesn't exist. Creating container ${KUBE_RACKSPACE_RELEASE_BUCKET}" + ${SWIFTLY_CMD} put ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 + fi +} + +# Copy kubernetes-server-linux-amd64.tar.gz to cloud files object store +copy_dev_tarballs() { + + echo "cluster/rackspace/util.sh: Uploading to Cloud Files" + ${SWIFTLY_CMD} put -i ${RELEASE_DIR}/kubernetes-server-linux-amd64.tar.gz \ + ${CLOUDFILES_CONTAINER}/${CONTAINER_PREFIX}/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1 + + echo "Release pushed." } rax-boot-master() { - ( - echo "#! /bin/bash" - echo "OBJECT_URL=\"${TEMP_URL}\"" - echo "MASTER_HTPASSWD=${HTPASSWD}" - grep -v "^#" $(dirname $0)/templates/download-release.sh - ) > ${KUBE_TEMP}/masterStart.sh + DISCOVERY_URL=$(curl https://discovery.etcd.io/new) + DISCOVERY_ID=$(echo "${DISCOVERY_URL}" | cut -f 4 -d /) + echo "cluster/rackspace/util.sh: etcd discovery URL: ${DISCOVERY_URL}" -# Copy cloud-config to KUBE_TEMP and work some sed magic. Some vars can have -# '/' embedded, so don't use that for sed. - sed -e "s|KUBE_MASTER|$MASTER_NAME|g" \ - -e "s|MASTER_HTPASSWD|$HTPASSWD|" \ - -e "s|PORTAL_NET|$PORTAL_NET|" \ - $(dirname $0)/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml +# Copy cloud-config to KUBE_TEMP and work some sed magic + sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \ + -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\&}|" \ + -e "s|KUBE_USER|${KUBE_USER}|" \ + -e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \ + -e "s|PORTAL_NET|${PORTAL_NET}|" \ + $(dirname $0)/rackspace/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml MASTER_BOOT_CMD="nova boot \ @@ -83,9 +163,9 @@ rax-boot-master() { --flavor ${KUBE_MASTER_FLAVOR} \ --image ${KUBE_IMAGE} \ --meta ${MASTER_TAG} \ +--meta ETCD=${DISCOVERY_ID} \ --user-data ${KUBE_TEMP}/master-cloud-config.yaml \ --config-drive true \ ---file /root/masterStart.sh=${KUBE_TEMP}/masterStart.sh \ --nic net-id=${NETWORK_UUID} \ ${MASTER_NAME}" @@ -96,28 +176,25 @@ ${MASTER_NAME}" rax-boot-minions() { - cp $(dirname $0)/cloud-config/minion-cloud-config.yaml \ + cp $(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml \ ${KUBE_TEMP}/minion-cloud-config.yaml for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo "MASTER_NAME=${MASTER_IP}" - echo "MINION_IP_RANGE=${KUBE_NETWORK[$i]}" - echo "NUM_MINIONS=${RAX_NUM_MINIONS}" - grep -v "^#" $(dirname $0)/templates/salt-minion.sh - ) > ${KUBE_TEMP}/minionStart${i}.sh + sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \ + -e "s|INDEX|$((i + 1))|g" \ + -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\&}|" \ + $(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml + MINION_BOOT_CMD="nova boot \ --key-name ${SSH_KEY_NAME} \ --flavor ${KUBE_MINION_FLAVOR} \ --image ${KUBE_IMAGE} \ --meta ${MINION_TAG} \ ---user-data ${KUBE_TEMP}/minion-cloud-config.yaml \ +--user-data ${KUBE_TEMP}/minion-cloud-config-$(( i +1 )).yaml \ --config-drive true \ --nic net-id=${NETWORK_UUID} \ ---file=/root/minionStart.sh=${KUBE_TEMP}/minionStart${i}.sh \ ${MINION_NAMES[$i]}" echo "cluster/rackspace/util.sh: Booting ${MINION_NAMES[$i]} with following command:" @@ -171,21 +248,22 @@ detect-master-nova-net() { kube-up() { SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) - source $(dirname $0)/../gce/util.sh - source $(dirname $0)/util.sh - source $(dirname $0)/../../release/rackspace/config.sh + + rackspace-set-vars + ensure_dev_container + copy_dev_tarballs # Find the release to use. Generally it will be passed when doing a 'prod' # install and will default to the release/config.sh version when doing a # developer up. - find-object-url $CONTAINER output/release/$TAR_FILE + find-object-url # Create a temp directory to hold scripts that will be uploaded to master/minions KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) trap "rm -rf ${KUBE_TEMP}" EXIT get-password - python $(dirname $0)/../../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd + python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) rax-nova-network @@ -197,11 +275,6 @@ kube-up() { echo "cluster/rackspace/util.sh: Starting Cloud Servers" rax-boot-master - # a bit of a hack to wait until master is has an IP from the extra network - echo "cluster/rackspace/util.sh: sleeping 35 seconds" - sleep 35 - - detect-master-nova-net $NOVA_NETWORK_LABEL rax-boot-minions FAIL=0 @@ -224,20 +297,16 @@ kube-up() { echo #This will fail until apiserver salt is updated - until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ + until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \ --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do printf "." sleep 2 done echo "Kubernetes cluster created." - echo "Sanity checking cluster..." - - sleep 5 # Don't bail on errors, we want to be able to print some info. set +e - sleep 45 detect-minions diff --git a/cluster/saltbase/salt/apiserver/default b/cluster/saltbase/salt/apiserver/default index f3a902bbda108..6e7b66f795938 100644 --- a/cluster/saltbase/salt/apiserver/default +++ b/cluster/saltbase/salt/apiserver/default @@ -45,15 +45,6 @@ {% set machines = "-machines=$(echo ${MACHINE_IPS[@]} | xargs -n1 echo | paste -sd,)" %} {% set minion_regexp = "" %} {% endif %} -{%- if grains.cloud == 'rackspace' %} - {%- set ip_addrs = [] %} - {%- for addrs in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %} - {%- do ip_addrs.append(addrs.ip_interfaces.eth2[0]) %} - {%- endfor %} - MACHINES="{{ip_addrs|join(',')}}" - {%- set machines = "-machines=$MACHINES" %} - {%- set minion_regexp = "" %} -{% endif %} {% endif %} {% if pillar['portal_net'] is defined %} {% set portal_net = "-portal_net=" + pillar['portal_net'] %} diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 7e5de3340de0d..352b46ebaf5e7 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -1,37 +1,43 @@ -# WARNING -These instructions are broken at git HEAD. Please either: -* Sync back to `v0.3` with `git checkout v0.3` -* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz) - # Rackspace -In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different mainly due to network differences between the providers: +In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and network design. + +These scripts should be used to deploy development environments for Kubernetes. If your account leverages RackConnect or non-standard networking, these scripts will most likely not work without modification. + +NOTE: The rackspace scripts do NOT rely on `saltstack`. + +The current cluster design is inspired by: +- [corekube](https://github.com/metral/corekube/) +- [Angus Lees](https://github.com/anguslees/kube-openstack/) ## Prerequisites 1. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into. 2. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova.html) for more details. -3. You can test this by running `nova list` to make sure you're authenticated successfully. ## Provider: Rackspace - To use Rackspace as the provider, set the KUBERNETES_PROVIDER ENV variable: - `export KUBERNETES_PROVIDER=rackspace` and run the `hack/rackspace/dev-build-and-up.sh` script. + `export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh` script. -## Release -1. The kubernetes binaries will be built via the common build scripts in `release/`. There is a specific `release/rackspace` directory with scripts for the following steps: +## Build +1. The kubernetes binaries will be built via the common build scripts in `build/`. +2. If you've set the ENV `KUBERNETES_PROVIDER=rackspace`, the scripts will upload `kubernetes-server-linux-amd64.tar.gz` to Cloud Files. 2. A cloud files container will be created via the `swiftly` CLI and a temp URL will be enabled on the object. -3. The built `master-release.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted. -- NOTE: RELEASE tagging and launch scripts are not used currently. +3. The built `kubernetes-server-linux-amd64.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted. ## Cluster 1. There is a specific `cluster/rackspace` directory with the scripts for the following steps: 2. A cloud network will be created and all instances will be attached to this network. We will connect the master API and minion kubelet service via this network. 3. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines since we won't capture the password. -4. A master will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data. A basic `masterStart.sh` will be injected as a file and cloud-init will run it. -5. We sleep for 25 seconds since we need to make sure we can get the IP address of the master on the cloud network we've created to provide the minions as their salt master. -6. We then boot as many minions as defined via `$RAX_NUM_MINIONS`. We pass both a `cloud-config.yaml` as well as a `minionStart.sh`. The latter is executed via cloud-init just like on the master. +4. A master and minions will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems. +5. We then boot as many minions as defined via `$RAX_NUM_MINIONS`. ## Some notes: - The scripts expect `eth2` to be the cloud network that the containers will communicate across. -- `vxlan` is required on the cloud network interface since cloud networks will filter based on MAC address. This is the workaround for the time being. -- A linux image with a recent kernel `> 13.07` is required for `vxlan`. Ubuntu 14.04 works. - A number of the items in `config-default.sh` are overridable via environment variables. -- routes must be configured on each minion so that containers and kube-proxy are able to locate containers on another system. This is due to the network design in kubernetes and the MAC address limits on Cloud Networks. Static Routes are currently leveraged until we implement a more advanced solution. +- For older versions please either: + * Sync back to `v0.3` with `git checkout v0.3` + * Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz) + +## Network Design +- eth0 - Public Interface used for servers/containers to reach the internet +- eth1 - ServiceNet - Intra-cluster communication (k8s, etcd, etc) communicate via this interface. The `cloud-config` files use the special CoreOS identifier `$private_ipv4` to configure the services. +- eth2 - Cloud Network - Used for k8s pods to communicate with one another. The proxy service will pass traffic via this interface. diff --git a/icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml b/icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml deleted file mode 100644 index a9b3800eea987..0000000000000 --- a/icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -#cloud-config - -write_files: -- content: | - grains: - roles: - - kubernetes-master - cloud: rackspace - etcd_servers: KUBE_MASTER - portal_net: PORTAL_NET - path: /etc/salt/minion.d/grains.conf -- content: | - auto_accept: True - path: /etc/salt/master.d/auto-accept.conf -- content: | - reactor: - - 'salt/minion/*/start': - - /srv/reactor/start.sls - path: /etc/salt/master.d/reactor.conf -- content: | - master: KUBE_MASTER - path: /etc/salt/minion.d/master.conf - -runcmd: - - [mkdir, -p, /etc/salt/minion.d] - - [mkdir, -p, /etc/salt/master.d] - - [mkdir, -p, /srv/salt/nginx] - - echo "MASTER_HTPASSWD" > /srv/salt/nginx/htpasswd - - [bash, /root/masterStart.sh] - - curl -L http://bootstrap.saltstack.com | sh -s -- -M -X diff --git a/icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml b/icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml deleted file mode 100644 index fab7a7b1abaf0..0000000000000 --- a/icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -#cloud-config - -runcmd: - - [mkdir, -p, /etc/salt/minion.d] - - [bash, /root/minionStart.sh] diff --git a/icebox/cluster/rackspace/templates/download-release.sh b/icebox/cluster/rackspace/templates/download-release.sh deleted file mode 100644 index 3d036add0e8de..0000000000000 --- a/icebox/cluster/rackspace/templates/download-release.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Download and install release - -# This script assumes that the environment variable MASTER_RELEASE_TAR contains -# the release tar to download and unpack. It is meant to be pushed to the -# master and run. - -echo "Downloading release ($OBJECT_URL)" -wget $OBJECT_URL -O master-release.tgz - -echo "Unpacking release" -rm -rf master-release || false -tar xzf master-release.tgz - -echo "Running release install script" -sudo master-release/src/scripts/master-release-install.sh diff --git a/icebox/cluster/rackspace/templates/salt-minion.sh b/icebox/cluster/rackspace/templates/salt-minion.sh deleted file mode 100644 index 78daf184c9fcc..0000000000000 --- a/icebox/cluster/rackspace/templates/salt-minion.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo master: $MASTER_NAME > /etc/salt/minion.d/master.conf -# Turn on debugging for salt-minion -# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion -MINION_IP=$(ip -f inet a sh dev eth2 | awk -F '[ \t/]+' '/inet/ { print $3 }' ) -# Our minions will have a pool role to distinguish them from the master. -cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-pool - cbr-cidr: $MINION_IP_RANGE - minion_ip: $MINION_IP - etcd_servers: $MASTER_NAME -EOF -#Move all of this to salt -apt-get update -apt-get install bridge-utils -y -brctl addbr cbr0 -ip l set dev cbr0 up -#for loop to add routes of other minions -for i in `seq 1 $NUM_MINIONS` -do ip r a 10.240.$i.0/24 dev cbr0 -done -ip l a vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2 -brctl addif cbr0 vxlan42 -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -curl -L http://bootstrap.saltstack.com | sh -s -- -X -ip l set vxlan42 up \ No newline at end of file diff --git a/icebox/release/rackspace/config.sh b/icebox/release/rackspace/config.sh deleted file mode 100644 index 8faf2fb11ffb5..0000000000000 --- a/icebox/release/rackspace/config.sh +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A set of Cloud Files defaults for which Kubernetes releases will be uploaded to - -# Make sure swiftly is installed and available -if [ "$(which swiftly)" == "" ]; then - echo "release/rackspace/config.sh: Couldn't find swiftly in PATH. Please install swiftly:" - echo -e "\tpip install swiftly" - exit 1 -fi - -CONTAINER="kubernetes-releases-${OS_USERNAME}" - -TAR_FILE=master-release.tgz diff --git a/icebox/release/rackspace/release.sh b/icebox/release/rackspace/release.sh deleted file mode 100755 index 0caac1ce178aa..0000000000000 --- a/icebox/release/rackspace/release.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script will build and release Kubernetes. -# -# The main parameters to this script come from the config.sh file. This is set -# up by default for development releases. Feel free to edit it or override some -# of the variables there. - -# exit on any error -set -e - -SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) - -source $SCRIPT_DIR/config.sh -KUBE_REPO_ROOT="$(cd "$(dirname "$0")/../../" && pwd -P)" - -source "${KUBE_REPO_ROOT}/cluster/kube-env.sh" -source $SCRIPT_DIR/../../cluster/rackspace/${KUBE_CONFIG_FILE-"config-default.sh"} -source $SCRIPT_DIR/../../cluster/rackspace/util.sh - -$SCRIPT_DIR/../build-release.sh $INSTANCE_PREFIX - -# Copy everything up to swift object store -echo "release/rackspace/release.sh: Uploading to Cloud Files" -if ! swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD get $CONTAINER > /dev/null 2>&1 ; then - echo "release/rackspace/release.sh: Container doesn't exist. Creating..." - swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put $CONTAINER > /dev/null 2>&1 - -fi - -for x in master-release.tgz; do - swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put -i _output/release/$x $CONTAINER/output/release/$x > /dev/null 2>&1 -done - -echo "Release pushed."