diff --git a/cluster/rackspace/cloud-config/minion-cloud-config.yaml b/cluster/rackspace/cloud-config/minion-cloud-config.yaml index 3d4b574068305..d1bab0db3f002 100644 --- a/cluster/rackspace/cloud-config/minion-cloud-config.yaml +++ b/cluster/rackspace/cloud-config/minion-cloud-config.yaml @@ -83,6 +83,10 @@ coreos: peer-addr: $private_ipv4:7001 peer-bind-addr: $private_ipv4:7001 + flannel: + ip_masq: true + interface: eth2 + fleet: public-ip: $private_ipv4 metadata: kubernetes_role=minion @@ -96,20 +100,16 @@ coreos: - name: fleet.service command: start - name: flanneld.service - command: start drop-ins: - - name: 50-network-config.conf - content: | - [Service] - ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "KUBE_NETWORK", "Backend": {"Type": "host-gw"}}' - ExecStart= - ExecStart=/usr/libexec/sdnotify-proxy /run/flannel/sd.sock \ - /usr/bin/docker run --net=host --privileged=true --rm \ - --volume=/run/flannel:/run/flannel \ - --env=NOTIFY_SOCKET=/run/flannel/sd.sock \ - --env-file=/run/flannel/options.env \ - --volume=${ETCD_SSL_DIR}:/etc/ssl/etcd:ro \ - quay.io/coreos/flannel:${FLANNEL_VER} /opt/bin/flanneld -etcd-endpoints http://127.0.0.1:4001 --ip-masq=true --iface=eth2 + - name: 50-flannel.conf + content: | + [Unit] + Requires=etcd.service + After=etcd.service + + [Service] + ExecStartPre=-/usr/bin/etcdctl mk /coreos.com/network/config '{"Network":"KUBE_NETWORK", "Backend": {"Type": "host-gw"}}' + command: start - name: docker.service command: start drop-ins: @@ -120,6 +120,7 @@ coreos: # won't land in flannel's network... Requires=flanneld.service After=flanneld.service + Restart=Always - name: download-release.service command: start content: | diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index 5929627cd7ad6..ca254eb09b269 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -321,7 +321,8 @@ kube-up() { detect-master # TODO look for a better way to get the known_tokens to the master. This is needed over file injection since the files were too large on a 4 node cluster. - $(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:known_tokens.csv) + $(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:/home/core/known_tokens.csv) + $(sleep 2) $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo mv /home/core/known_tokens.csv /var/lib/kube-apiserver/known_tokens.csv) $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo chown root.root /var/lib/kube-apiserver/known_tokens.csv) $(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo systemctl restart kube-apiserver) diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 7cdf423c3c550..4ddbd748b2679 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -1,7 +1,6 @@ # Rackspace -* Supported Version: v0.16.2 - * `git checkout v0.16.2` +* Supported Version: v0.18.1 In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and the overall network design. @@ -20,8 +19,9 @@ The current cluster design is inspired by: ##Provider: Rackspace -- To install the latest released version of kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash` - To build your own released version from source use `export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh` +- Note: The get.k8s.io install method is not working yet for our scripts. + * To install the latest released version of kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash` ## Build 1. The kubernetes binaries will be built via the common build scripts in `build/`.