From a8bae68865a5f87e5ba52fe6a331714940d48587 Mon Sep 17 00:00:00 2001 From: Ryan Richard Date: Sat, 11 Oct 2014 19:32:53 -0500 Subject: [PATCH] Rackspace - Switch to CoreOS for standard cluster The Rackspace setup for Kubernetes now leverages CoreOS instead of Ubuntu. We've dropped Salt completely for our cluster. --- README.md | 3 +- build/common.sh | 65 +++++ build/release.sh | 3 +- .../cloud-config/master-cloud-config.yaml | 153 ++++++++++++ .../cloud-config/minion-cloud-config.yaml | 224 ++++++++++++++++++ .../rackspace/config-default.sh | 8 +- .../cluster => cluster}/rackspace/kube-up.sh | 0 {icebox/cluster => cluster}/rackspace/util.sh | 113 +++++---- cluster/saltbase/salt/apiserver/default | 9 - docs/getting-started-guides/rackspace.md | 26 +- .../cloud-config/master-cloud-config.yaml | 30 --- .../cloud-config/minion-cloud-config.yaml | 5 - .../rackspace/templates/download-release.sh | 31 --- .../rackspace/templates/salt-minion.sh | 48 ---- icebox/release/rackspace/config.sh | 26 -- 15 files changed, 529 insertions(+), 215 deletions(-) create mode 100644 cluster/rackspace/cloud-config/master-cloud-config.yaml create mode 100644 cluster/rackspace/cloud-config/minion-cloud-config.yaml rename {icebox/cluster => cluster}/rackspace/config-default.sh (84%) rename {icebox/cluster => cluster}/rackspace/kube-up.sh (100%) rename {icebox/cluster => cluster}/rackspace/util.sh (70%) delete mode 100644 icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml delete mode 100644 icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml delete mode 100644 icebox/cluster/rackspace/templates/download-release.sh delete mode 100644 icebox/cluster/rackspace/templates/salt-minion.sh delete mode 100644 icebox/release/rackspace/config.sh diff --git a/README.md b/README.md index 424015b90159e..ea99ed58ae9ad 100644 --- a/README.md +++ b/README.md @@ -23,11 +23,12 @@ While the concepts and architecture in Kubernetes represent years of experience * [CoreOS](docs/getting-started-guides/coreos.md) * [OpenStack](https://developer.rackspace.com/blog/running-coreos-and-kubernetes/) * [CloudStack](docs/getting-started-guides/cloudstack.md) + * [Rackspace](docs/getting-started-guides/rackspace.md) + * The following clouds are currently broken at Kubernetes head. Please sync your client to `v0.3` (`git checkout v0.3`) to use these: * [Locally](docs/getting-started-guides/locally.md) * [vSphere](docs/getting-started-guides/vsphere.md) * [Microsoft Azure](docs/getting-started-guides/azure.md) - * [Rackspace](docs/getting-started-guides/rackspace.md) * [Kubernetes 101](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough) * [kubecfg command line tool](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/cli.md) * [Kubernetes API Documentation](http://cdn.rawgit.com/GoogleCloudPlatform/kubernetes/31a0daae3627c91bc96e1f02a6344cd76e294791/api/kubernetes.html) diff --git a/build/common.sh b/build/common.sh index 15a9189f8b6ff..f849ba4a534bc 100644 --- a/build/common.sh +++ b/build/common.sh @@ -663,3 +663,68 @@ function kube::release::gcs::copy_release_tarballs() { gsutil ls -lh "${gcs_destination}" } + +# --------------------------------------------------------------------------- +# Rackspace Release + +function kube::release::rackspace::release() { + + [[ ${KUBE_RACKSPACE_UPLOAD_RELEASE-y} =~ ^[yY]$ ]] || return 0 + + CLOUDFILES_CONTAINER="kubernetes-releases-${OS_USERNAME}" + + kube::release::rackspace::verify_prereqs + kube::release::rackspace::ensure_release_container + kube::release::rackspace::copy_release_tarballs +} + +# Verify things are set up for uploading to GCS +function kube::release::rackspace::verify_prereqs() { + + # Make sure swiftly is installed and available + if [[ -z "$(which gsutil)" ]]; then + echo "build/common.sh: Couldn't find swiftly in PATH. Please install swiftly:" + echo -e "\tpip install swiftly" + return 1 + fi + + if [[ -z "${OS_AUTH_URL-}" ]]; then + echo "build/common.sh: OS_AUTH_URL not set." + echo -e "\texport OS_AUTH_URL=https://identity.api.rackspacecloud.com/v2.0/" + return 1 + fi + + if [[ -z "${OS_USERNAME-}" ]]; then + echo "build/common.sh: OS_USERNAME not set." + echo -e "\texport OS_USERNAME=myusername" + return 1 + fi + + if [[ -z "${OS_PASSWORD-}" ]]; then + echo "build/common.sh: OS_PASSWORD not set." + echo -e "\texport OS_PASSWORD=myapikey" + return 1 + fi +} + +function kube::release::rackspace::ensure_release_container() { + + KUBE_RACKSPACE_RELEASE_BUCKET=${KUBE_RACKSPACE_RELEASE_BUCKET-kubernetes-releases-${OS_USERNAME}} + KUBE_RACKSPACE_RELEASE_PREFIX=${KUBE_RACKSPACE_RELEASE_PREFIX-devel/} + + SWIFTLY_CMD="swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD}" + + if ! ${SWIFTLY_CMD} get ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 ; then + echo "build/common.sh: Container doesn't exist. Creating container ${CLOUDFILES_CONTAINER}" + ${SWIFTLY_CMD} put ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 + fi +} + +function kube::release::rackspace::copy_release_tarballs() { + + # Copy release tar.gz to cloud files object store + echo "build/common.sh: Uploading to Cloud Files" + ${SWIFTLY_CMD} put -i ${RELEASE_DIR}/kubernetes-server-linux-amd64.tar.gz ${CLOUDFILES_CONTAINER}/devel/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1 + + echo "Release pushed." +} diff --git a/build/release.sh b/build/release.sh index 5ab7ae2db53fb..d1e25548b7288 100755 --- a/build/release.sh +++ b/build/release.sh @@ -40,4 +40,5 @@ fi kube::build::copy_output kube::build::run_image kube::release::package_tarballs -kube::release::gcs::release + +kube::release::${KUBERNETES_PROVIDER}::release diff --git a/cluster/rackspace/cloud-config/master-cloud-config.yaml b/cluster/rackspace/cloud-config/master-cloud-config.yaml new file mode 100644 index 0000000000000..5fe30a190150f --- /dev/null +++ b/cluster/rackspace/cloud-config/master-cloud-config.yaml @@ -0,0 +1,153 @@ +#cloud-config + +write_files: + - path: /opt/bin/regen-minion-list.sh + permissions: 0755 + content: | + #!/bin/sh + m=$(echo $(etcdctl ls --recursive /corekube/minions | cut -d/ -f4 | sort) | tr ' ' ,) + echo "Found $m" + mkdir -p /run/apiserver + echo "MINIONS=$m" > /run/apiserver/minions.env + - path: /opt/bin/git-kubernetes-nginx.sh + permissions: 0755 + content: | + #!/bin/bash + git clone https://github.com/doublerr/kubernetes_nginx /opt/kubernetes_nginx + /usr/bin/cp /opt/.kubernetes_auth /opt/kubernetes_nginx/.kubernetes_auth + docker build -t kubernetes_nginx:latest /opt/kubernetes_nginx + - path: /opt/bin/download-release.sh + permissions: 0755 + content: | + #!/bin/bash + OBJECT_URL="CLOUD_FILES_URL" + echo "Downloading release ($OBJECT_URL)" + wget $OBJECT_URL -O /opt/kubernetes.tar.gz + echo "Unpacking release" + rm -rf /opt/kubernetes || false + tar xzf /opt/kubernetes.tar.gz -C /opt/ + - path: /opt/.kubernetes_auth + permissions: 0600 + content: | + KUBE_USER:KUBE_PASSWORD + +coreos: + etcd: + name: kubernetes-master + discovery: https://discovery.etcd.io/DISCOVERY_ID + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + peer-bind-addr: $private_ipv4:7001 + + fleet: + public-ip: $private_ipv4 + metadata: kubernetes_role=master + + update: + reboot-strategy: etcd-lock + + units: + #- name: nova-agent-watcher.service + # command: try-restart + - name: etcd.service + command: start + - name: fleet.service + command: start + - name: download-release.service + command: start + content: | + [Unit] + Description=Downloads Kubernetes Release + After=network-online.target + Requires=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/bash /opt/bin/download-release.sh + - name: master-apiserver.service + command: start + content: | + [Unit] + Description=Kubernetes API Server + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=minion-finder.service + Requires=minion-finder.service + After=download-release.service + Requires=download-release.service + [Service] + EnvironmentFile=-/run/apiserver/minions.env + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/apiserver /opt/bin/apiserver + ExecStart=/opt/bin/apiserver --address=127.0.0.1 --port=8080 --machines=${MINIONS} --etcd_servers=http://127.0.0.1:4001 --logtostderr=true + Restart=always + RestartSec=2 + - name: master-apiserver-sighup.path + command: start + content: | + [Path] + PathChanged=/run/apiserver/minions.env + - name: master-apiserver-sighup.service + command: start + content: | + [Service] + ExecStart=/usr/bin/pkill -SIGHUP -f apiserver + - name: minion-finder.service + command: start + content: | + [Unit] + Description=Kubernetes Minion finder + After=network-online.target + Requires=network-online.target + After=etcd.service + Requires=etcd.service + [Service] + ExecStartPre=/opt/bin/regen-minion-list.sh + ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/minions -- /opt/bin/regen-minion-list.sh + Restart=always + RestartSec=30 + - name: master-controller-manager.service + command: start + content: | + [Unit] + Description=Kubernetes Controller Manager + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=master-apiserver.service + Requires=master-apiserver.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/controller-manager /opt/bin/controller-manager + ExecStart=/opt/bin/controller-manager --master=127.0.0.1:8080 --logtostderr=true + Restart=always + RestartSec=2 + - name: master-scheduler.service + command: start + content: | + [Unit] + Description=Kubernetes Scheduler + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=master-apiserver.service + Requires=master-apiserver.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/scheduler /opt/bin/scheduler + ExecStart=/opt/bin/scheduler --master=127.0.0.1:8080 --logtostderr=true + Restart=always + RestartSec=10 + - name: kubernetes-nginx.service + command: start + content: | + [Unit] + Description=Kubernetes Nginx Service + After=network-online.target + Requires=network-online.target + After=docker.service + Requires=docker.service + [Service] + ExecStartPre=/opt/bin/git-kubernetes-nginx.sh + ExecStart=/usr/bin/docker run --rm --net="host" -p "443:443" -t --name "kubernetes_nginx" kubernetes_nginx + ExecStop=/usr/bin/docker stop kubernetes_nginx + Restart=always + RestartSec=15 diff --git a/cluster/rackspace/cloud-config/minion-cloud-config.yaml b/cluster/rackspace/cloud-config/minion-cloud-config.yaml new file mode 100644 index 0000000000000..33e4e77cb43e4 --- /dev/null +++ b/cluster/rackspace/cloud-config/minion-cloud-config.yaml @@ -0,0 +1,224 @@ +#cloud-config + +write_files: + - path: /opt/bin/kube-net-update.sh + permissions: 0755 + content: | + #!/bin/sh + set -x -e + nh=${ETCD_WATCH_KEY##*/} + net=$ETCD_WATCH_VALUE + case $ETCD_WATCH_ACTION in + set) ip route replace $net via $nh dev eth2 metric 900 ;; + expire) ip route del $net via $nh metric 900 ;; + esac + - path: /opt/bin/download-release.sh + permissions: 0755 + content: | + #!/bin/bash + OBJECT_URL="http://storage.googleapis.com/kubernetes-releases-56726/devel/kubernetes.tar.gz" + echo "Downloading release ($OBJECT_URL)" + wget $OBJECT_URL -O /opt/kubernetes.tar.gz + echo "Unpacking release" + rm -rf /opt/kubernetes || false + tar xzf /opt/kubernetes.tar.gz -C /opt/ + - path: /opt/kubernetes-manifests/cadvisor.manifest + permissions: 0755 + content: | + version: v1beta2 + id: cadvisor-agent + containers: + - name: cadvisor + image: google/cadvisor:latest + ports: + - name: http + containerPort: 8080 + hostPort: 4194 + volumeMounts: + - name: varrun + mountPath: /var/run + readOnly: false + - name: varlibdocker + mountPath: /var/lib/docker + readOnly: true + - name: cgroups + mountPath: /sys/fs/cgroup + readOnly: true + volumes: + - name: varrun + source: + hostDir: + path: /var/run + - name: varlibdocker + source: + hostDir: + path: /var/lib/docker + - name: cgroups + source: + hostDir: + path: /sys/fs/cgroup + +coreos: + etcd: + name: kubernetes-minion-INDEX + discovery: https://discovery.etcd.io/DISCOVERY_ID + addr: $private_ipv4:4001 + peer-addr: $private_ipv4:7001 + peer-bind-addr: $private_ipv4:7001 + + fleet: + public-ip: $private_ipv4 + metadata: kubernetes_role=minion + + update: + reboot-strategy: etcd-lock + + units: + - name: etcd.service + command: start + - name: fleet.service + command: start + - name: download-release.service + command: start + content: | + [Unit] + Description=Downloads Kubernetes Release + After=network-online.target + Requires=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/bin/bash /opt/bin/download-release.sh + - name: minion-kubelet.service + command: start + content: | + [Unit] + Description=Kubernetes Kubelet + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=docker.service + Requires=docker.service + After=download-release.service + Requires=download-release.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/kubelet /opt/bin/kubelet + ExecStart=/opt/bin/kubelet --address=$private_ipv4 --hostname_override=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --config=/opt/kubernetes-manifests + Restart=always + RestartSec=2 + - name: minion-proxy.service + command: start + content: | + [Unit] + Description=Kubernetes Proxy + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + After=network-online.target + Requires=network-online.target + After=docker.service + Requires=docker.service + After=download-release.service + Requires=download-release.service + [Service] + ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/proxy /opt/bin/proxy + ExecStart=/opt/bin/proxy --bind_address=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true + Restart=always + RestartSec=2 + - name: minion-advertiser.service + command: start + content: | + [Unit] + Description=Kubernetes Minion Advertiser + After=etcd.service + Requires=etcd.service + After=minion-kubelet.service + [Service] + ExecStart=/bin/sh -c 'while :; do etcdctl set /corekube/minions/$private_ipv4 $private_ipv4 --ttl 300; sleep 120; done' + Restart=always + RestartSec=120 + - name: net-advertiser.service + command: start + content: | + [Unit] + Description=Kubernetes Network Advertiser + After=etcd.service + Requires=etcd.service + After=minion-kubelet.service + [Service] + ExecStart=/bin/sh -c 'eth2_ip=$$(ip -o -f inet a show dev eth2 | sed "s/.* inet \([0-9.]\+\).*/\1/"); while :; do etcdctl set /corekube/net/$$eth2_ip 10.240.INDEX.0/24 --ttl 300; sleep 120; done' + Restart=always + RestartSec=120 + - name: net-router.service + command: start + content: | + [Unit] + Description=Kubernetes Network Router + After=etcd.service + Requires=etcd.service + After=minion-kubelet.service + [Service] + ExecStart=/usr/bin/etcdctl exec-watch --recursive /corekube/net -- /opt/bin/kube-net-update.sh + Restart=always + RestartSec=120 + - name: cbr0.netdev + command: start + content: | + [NetDev] + Kind=bridge + Name=cbr0 + - name: cbr0.network + command: start + content: | + [Match] + Name=cbr0 + + [Network] + Address=10.240.INDEX.1/24 + - name: nat.service + command: start + content: | + [Unit] + Description=NAT container->outside traffic + + [Service] + ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth0 -s 10.240.INDEX.0/24 -j MASQUERADE + ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o eth1 -s 10.240.INDEX.0/24 -j MASQUERADE + RemainAfterExit=yes + Type=oneshot + - name: docker.service + command: start + content: | + [Unit] + After=network.target + Description=Docker Application Container Engine + Documentation=http://docs.docker.io + + [Service] + ExecStartPre=/bin/mount --make-rprivate / + ExecStart=/usr/bin/docker -d -s=btrfs -H fd:// -b cbr0 --iptables=false + Restart=always + RestartSec=30 + + [Install] + WantedBy=multi-user.target + - name: format-data.service + command: start + content: | + [Unit] + Description=Formats data drive + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStart=/usr/sbin/wipefs -f /dev/xvde1 + ExecStart=/usr/sbin/mkfs.btrfs -f /dev/xvde1 + - name: var-lib-docker-volumes.mount + command: start + content: | + [Unit] + Description=Mount data drive to /var/lib/docker/volumes + Requires=format-data.service + After=format-data.service + Before=docker.service + [Mount] + What=/dev/xvde1 + Where=/var/lib/docker/volumes + Type=btrfs diff --git a/icebox/cluster/rackspace/config-default.sh b/cluster/rackspace/config-default.sh similarity index 84% rename from icebox/cluster/rackspace/config-default.sh rename to cluster/rackspace/config-default.sh index 6d4837a6fc7ee..7fb84a37a466d 100644 --- a/icebox/cluster/rackspace/config-default.sh +++ b/cluster/rackspace/config-default.sh @@ -19,7 +19,7 @@ # KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_MINION_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME # Shared -KUBE_IMAGE="${KUBE_IMAGE-255df5fb-e3d4-45a3-9a07-c976debf7c14}" # Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) +KUBE_IMAGE="${KUBE_IMAGE-b63e1435-a46f-4726-b984-e3f15ae92753}" # CoreOS(Beta) SSH_KEY_NAME="${SSH_KEY_NAME-id_kubernetes}" NOVA_NETWORK_LABEL="kubernetes-pool-net" NOVA_NETWORK_CIDR="${NOVA_NETWORK-192.168.0.0/24}" @@ -28,12 +28,12 @@ INSTANCE_PREFIX="kubernetes" # Master KUBE_MASTER_FLAVOR="${KUBE_MASTER_FLAVOR-performance1-1}" MASTER_NAME="${INSTANCE_PREFIX}-master" -MASTER_TAG="tag=${INSTANCE_PREFIX}-master" +MASTER_TAG="tags=${INSTANCE_PREFIX}-master" # Minion -KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-1}" +KUBE_MINION_FLAVOR="${KUBE_MINION_FLAVOR-performance1-2}" RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}" -MINION_TAG="tag=${INSTANCE_PREFIX}-minion" +MINION_TAG="tags=${INSTANCE_PREFIX}-minion" MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}})) KUBE_NETWORK=($(eval echo "10.240.{1..${RAX_NUM_MINIONS}}.0/24")) PORTAL_NET="10.0.0.0/16" diff --git a/icebox/cluster/rackspace/kube-up.sh b/cluster/rackspace/kube-up.sh similarity index 100% rename from icebox/cluster/rackspace/kube-up.sh rename to cluster/rackspace/kube-up.sh diff --git a/icebox/cluster/rackspace/util.sh b/cluster/rackspace/util.sh similarity index 70% rename from icebox/cluster/rackspace/util.sh rename to cluster/rackspace/util.sh index 30a3293894f53..60ea0738161b3 100644 --- a/icebox/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -18,6 +18,7 @@ # Use the config file specified in $KUBE_CONFIG_FILE, or default to # config-default.sh. +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} verify-prereqs() { @@ -30,6 +31,32 @@ verify-prereqs() { done } +# Ensure that we have a password created for validating to the master. Will +# read from $HOME/.kubernetres_auth if available. +# +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +get-password() { + local file="$HOME/.kubernetes_auth" + if [[ -r "$file" ]]; then + KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') + KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') + return + fi + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + + # Store password for reuse. + cat << EOF > "$file" +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD" +} +EOF + chmod 0600 "$file" +} + rax-ssh-key() { if [ ! -f $HOME/.ssh/${SSH_KEY_NAME} ]; then echo "cluster/rackspace/util.sh: Generating SSH KEY ${HOME}/.ssh/${SSH_KEY_NAME}" @@ -45,17 +72,22 @@ rax-ssh-key() { fi } -find-object-url() { - if [ -n "$1" ]; then - CONTAINER=$1 - else - local RELEASE_CONFIG_SCRIPT=$(dirname $0)/../release/rackspace/config.sh - if [ -f $(dirname $0)/../release/rackspace/config.sh ]; then - . $RELEASE_CONFIG_SCRIPT - fi +find-release-tars() { + SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" + fi + if [[ ! -f "$SERVER_BINARY_TAR" ]]; then + echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz" + exit 1 fi +} + +find-object-url() { - TEMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET $1/$2) + RELEASE=kubernetes-releases-${OS_USERNAME}/devel/kubernetes-server-linux-amd64.tar.gz + + TEMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET $RELEASE) echo "cluster/rackspace/util.sh: Object temp URL:" echo -e "\t${TEMP_URL}" @@ -63,19 +95,20 @@ find-object-url() { rax-boot-master() { - ( - echo "#! /bin/bash" - echo "OBJECT_URL=\"${TEMP_URL}\"" - echo "MASTER_HTPASSWD=${HTPASSWD}" - grep -v "^#" $(dirname $0)/templates/download-release.sh - ) > ${KUBE_TEMP}/masterStart.sh + DISCOVERY_URL=$(curl https://discovery.etcd.io/new) + DISCOVERY_ID=$(echo "${DISCOVERY_URL}" | cut -f 4 -d /) + echo "cluster/rackspace/util.sh: etcd discovery URL: ${DISCOVERY_URL}" + + get-password + find-object-url -# Copy cloud-config to KUBE_TEMP and work some sed magic. Some vars can have -# '/' embedded, so don't use that for sed. - sed -e "s|KUBE_MASTER|$MASTER_NAME|g" \ - -e "s|MASTER_HTPASSWD|$HTPASSWD|" \ - -e "s|PORTAL_NET|$PORTAL_NET|" \ - $(dirname $0)/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml +# Copy cloud-config to KUBE_TEMP and work some sed magic + sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \ + -e "s|CLOUD_FILES_URL|${TEMP_URL}|" \ + -e "s|KUBE_USER|${KUBE_USER}|" \ + -e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \ + -e "s|PORTAL_NET|${PORTAL_NET}|" \ + $(dirname $0)/rackspace/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml MASTER_BOOT_CMD="nova boot \ @@ -83,9 +116,9 @@ rax-boot-master() { --flavor ${KUBE_MASTER_FLAVOR} \ --image ${KUBE_IMAGE} \ --meta ${MASTER_TAG} \ +--meta ETCD=${DISCOVERY_ID} \ --user-data ${KUBE_TEMP}/master-cloud-config.yaml \ --config-drive true \ ---file /root/masterStart.sh=${KUBE_TEMP}/masterStart.sh \ --nic net-id=${NETWORK_UUID} \ ${MASTER_NAME}" @@ -96,28 +129,25 @@ ${MASTER_NAME}" rax-boot-minions() { - cp $(dirname $0)/cloud-config/minion-cloud-config.yaml \ + cp $(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml \ ${KUBE_TEMP}/minion-cloud-config.yaml for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - ( - echo "#! /bin/bash" - echo "MASTER_NAME=${MASTER_IP}" - echo "MINION_IP_RANGE=${KUBE_NETWORK[$i]}" - echo "NUM_MINIONS=${RAX_NUM_MINIONS}" - grep -v "^#" $(dirname $0)/templates/salt-minion.sh - ) > ${KUBE_TEMP}/minionStart${i}.sh + sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \ + -e "s|INDEX|$((i + 1))|g" \ + -e "s|CLOUD_FILES_URL|${TEMP_URL}|" \ + $(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml + MINION_BOOT_CMD="nova boot \ --key-name ${SSH_KEY_NAME} \ --flavor ${KUBE_MINION_FLAVOR} \ --image ${KUBE_IMAGE} \ --meta ${MINION_TAG} \ ---user-data ${KUBE_TEMP}/minion-cloud-config.yaml \ +--user-data ${KUBE_TEMP}/minion-cloud-config-$(( i +1 )).yaml \ --config-drive true \ --nic net-id=${NETWORK_UUID} \ ---file=/root/minionStart.sh=${KUBE_TEMP}/minionStart${i}.sh \ ${MINION_NAMES[$i]}" echo "cluster/rackspace/util.sh: Booting ${MINION_NAMES[$i]} with following command:" @@ -171,21 +201,17 @@ detect-master-nova-net() { kube-up() { SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd) - source $(dirname $0)/../gce/util.sh - source $(dirname $0)/util.sh - source $(dirname $0)/../../release/rackspace/config.sh - # Find the release to use. Generally it will be passed when doing a 'prod' # install and will default to the release/config.sh version when doing a # developer up. - find-object-url $CONTAINER output/release/$TAR_FILE + #find-object-url $CONTAINER output/release/$TAR_FILE # Create a temp directory to hold scripts that will be uploaded to master/minions KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX) trap "rm -rf ${KUBE_TEMP}" EXIT get-password - python $(dirname $0)/../../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd + python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $KUBE_USER $KUBE_PASSWORD HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) rax-nova-network @@ -197,11 +223,6 @@ kube-up() { echo "cluster/rackspace/util.sh: Starting Cloud Servers" rax-boot-master - # a bit of a hack to wait until master is has an IP from the extra network - echo "cluster/rackspace/util.sh: sleeping 35 seconds" - sleep 35 - - detect-master-nova-net $NOVA_NETWORK_LABEL rax-boot-minions FAIL=0 @@ -224,20 +245,16 @@ kube-up() { echo #This will fail until apiserver salt is updated - until $(curl --insecure --user ${user}:${passwd} --max-time 5 \ + until $(curl --insecure --user ${KUBE_USER}:${KUBE_PASSWORD} --max-time 5 \ --fail --output /dev/null --silent https://${KUBE_MASTER_IP}/api/v1beta1/pods); do printf "." sleep 2 done echo "Kubernetes cluster created." - echo "Sanity checking cluster..." - - sleep 5 # Don't bail on errors, we want to be able to print some info. set +e - sleep 45 detect-minions diff --git a/cluster/saltbase/salt/apiserver/default b/cluster/saltbase/salt/apiserver/default index f3a902bbda108..6e7b66f795938 100644 --- a/cluster/saltbase/salt/apiserver/default +++ b/cluster/saltbase/salt/apiserver/default @@ -45,15 +45,6 @@ {% set machines = "-machines=$(echo ${MACHINE_IPS[@]} | xargs -n1 echo | paste -sd,)" %} {% set minion_regexp = "" %} {% endif %} -{%- if grains.cloud == 'rackspace' %} - {%- set ip_addrs = [] %} - {%- for addrs in salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values() %} - {%- do ip_addrs.append(addrs.ip_interfaces.eth2[0]) %} - {%- endfor %} - MACHINES="{{ip_addrs|join(',')}}" - {%- set machines = "-machines=$MACHINES" %} - {%- set minion_regexp = "" %} -{% endif %} {% endif %} {% if pillar['portal_net'] is defined %} {% set portal_net = "-portal_net=" + pillar['portal_net'] %} diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 7e5de3340de0d..67f9643f3054d 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -1,10 +1,16 @@ -# WARNING -These instructions are broken at git HEAD. Please either: +# Rackspace +In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS and network design. + + +NOTE: The rackspace scripts do NOT rely on saltstack. + +For older versions please either: * Sync back to `v0.3` with `git checkout v0.3` * Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz) -# Rackspace -In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different mainly due to network differences between the providers: +The current cluster design is inspired by: +- [corekube](https://github.com/metral/corekube/) +- [Angus Lees](https://github.com/anguslees/kube-openstack/) ## Prerequisites 1. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into. @@ -13,9 +19,9 @@ In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE ## Provider: Rackspace - To use Rackspace as the provider, set the KUBERNETES_PROVIDER ENV variable: - `export KUBERNETES_PROVIDER=rackspace` and run the `hack/rackspace/dev-build-and-up.sh` script. + `export KUBERNETES_PROVIDER=rackspace` and run the `bash cluster/kube-up.sh` script. -## Release +## Build 1. The kubernetes binaries will be built via the common build scripts in `release/`. There is a specific `release/rackspace` directory with scripts for the following steps: 2. A cloud files container will be created via the `swiftly` CLI and a temp URL will be enabled on the object. 3. The built `master-release.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted. @@ -25,13 +31,9 @@ In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE 1. There is a specific `cluster/rackspace` directory with the scripts for the following steps: 2. A cloud network will be created and all instances will be attached to this network. We will connect the master API and minion kubelet service via this network. 3. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines since we won't capture the password. -4. A master will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data. A basic `masterStart.sh` will be injected as a file and cloud-init will run it. -5. We sleep for 25 seconds since we need to make sure we can get the IP address of the master on the cloud network we've created to provide the minions as their salt master. -6. We then boot as many minions as defined via `$RAX_NUM_MINIONS`. We pass both a `cloud-config.yaml` as well as a `minionStart.sh`. The latter is executed via cloud-init just like on the master. +4. A master and minions will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems. +5. We then boot as many minions as defined via `$RAX_NUM_MINIONS`. ## Some notes: - The scripts expect `eth2` to be the cloud network that the containers will communicate across. -- `vxlan` is required on the cloud network interface since cloud networks will filter based on MAC address. This is the workaround for the time being. -- A linux image with a recent kernel `> 13.07` is required for `vxlan`. Ubuntu 14.04 works. - A number of the items in `config-default.sh` are overridable via environment variables. -- routes must be configured on each minion so that containers and kube-proxy are able to locate containers on another system. This is due to the network design in kubernetes and the MAC address limits on Cloud Networks. Static Routes are currently leveraged until we implement a more advanced solution. diff --git a/icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml b/icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml deleted file mode 100644 index a9b3800eea987..0000000000000 --- a/icebox/cluster/rackspace/cloud-config/master-cloud-config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -#cloud-config - -write_files: -- content: | - grains: - roles: - - kubernetes-master - cloud: rackspace - etcd_servers: KUBE_MASTER - portal_net: PORTAL_NET - path: /etc/salt/minion.d/grains.conf -- content: | - auto_accept: True - path: /etc/salt/master.d/auto-accept.conf -- content: | - reactor: - - 'salt/minion/*/start': - - /srv/reactor/start.sls - path: /etc/salt/master.d/reactor.conf -- content: | - master: KUBE_MASTER - path: /etc/salt/minion.d/master.conf - -runcmd: - - [mkdir, -p, /etc/salt/minion.d] - - [mkdir, -p, /etc/salt/master.d] - - [mkdir, -p, /srv/salt/nginx] - - echo "MASTER_HTPASSWD" > /srv/salt/nginx/htpasswd - - [bash, /root/masterStart.sh] - - curl -L http://bootstrap.saltstack.com | sh -s -- -M -X diff --git a/icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml b/icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml deleted file mode 100644 index fab7a7b1abaf0..0000000000000 --- a/icebox/cluster/rackspace/cloud-config/minion-cloud-config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -#cloud-config - -runcmd: - - [mkdir, -p, /etc/salt/minion.d] - - [bash, /root/minionStart.sh] diff --git a/icebox/cluster/rackspace/templates/download-release.sh b/icebox/cluster/rackspace/templates/download-release.sh deleted file mode 100644 index 3d036add0e8de..0000000000000 --- a/icebox/cluster/rackspace/templates/download-release.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Download and install release - -# This script assumes that the environment variable MASTER_RELEASE_TAR contains -# the release tar to download and unpack. It is meant to be pushed to the -# master and run. - -echo "Downloading release ($OBJECT_URL)" -wget $OBJECT_URL -O master-release.tgz - -echo "Unpacking release" -rm -rf master-release || false -tar xzf master-release.tgz - -echo "Running release install script" -sudo master-release/src/scripts/master-release-install.sh diff --git a/icebox/cluster/rackspace/templates/salt-minion.sh b/icebox/cluster/rackspace/templates/salt-minion.sh deleted file mode 100644 index 78daf184c9fcc..0000000000000 --- a/icebox/cluster/rackspace/templates/salt-minion.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash - -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Prepopulate the name of the Master -mkdir -p /etc/salt/minion.d -echo master: $MASTER_NAME > /etc/salt/minion.d/master.conf -# Turn on debugging for salt-minion -# echo "DAEMON_ARGS=\"\$DAEMON_ARGS --log-file-level=debug\"" > /etc/default/salt-minion -MINION_IP=$(ip -f inet a sh dev eth2 | awk -F '[ \t/]+' '/inet/ { print $3 }' ) -# Our minions will have a pool role to distinguish them from the master. -cat </etc/salt/minion.d/grains.conf -grains: - roles: - - kubernetes-pool - cbr-cidr: $MINION_IP_RANGE - minion_ip: $MINION_IP - etcd_servers: $MASTER_NAME -EOF -#Move all of this to salt -apt-get update -apt-get install bridge-utils -y -brctl addbr cbr0 -ip l set dev cbr0 up -#for loop to add routes of other minions -for i in `seq 1 $NUM_MINIONS` -do ip r a 10.240.$i.0/24 dev cbr0 -done -ip l a vxlan42 type vxlan id 42 group 239.0.0.42 dev eth2 -brctl addif cbr0 vxlan42 -# Install Salt -# -# We specify -X to avoid a race condition that can cause minion failure to -# install. See https://github.com/saltstack/salt-bootstrap/issues/270 -curl -L http://bootstrap.saltstack.com | sh -s -- -X -ip l set vxlan42 up \ No newline at end of file diff --git a/icebox/release/rackspace/config.sh b/icebox/release/rackspace/config.sh deleted file mode 100644 index 8faf2fb11ffb5..0000000000000 --- a/icebox/release/rackspace/config.sh +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2014 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A set of Cloud Files defaults for which Kubernetes releases will be uploaded to - -# Make sure swiftly is installed and available -if [ "$(which swiftly)" == "" ]; then - echo "release/rackspace/config.sh: Couldn't find swiftly in PATH. Please install swiftly:" - echo -e "\tpip install swiftly" - exit 1 -fi - -CONTAINER="kubernetes-releases-${OS_USERNAME}" - -TAR_FILE=master-release.tgz