Skip to content

Commit

Permalink
cidr-fix for vsphere cloud provider
Browse files Browse the repository at this point in the history
let kubelet configure cbr0 bridge.
let kube-controll-manager distributed the subnet across the nodes.
create routes for the pod network.
  • Loading branch information
Dhawal Yogesh Bhanushali committed Mar 2, 2016
1 parent c9bd9e9 commit fe7568d
Show file tree
Hide file tree
Showing 7 changed files with 49 additions and 14 deletions.
4 changes: 0 additions & 4 deletions cluster/saltbase/salt/top.sls
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,3 @@ base:
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
- opencontrail-networking-master
{% endif %}

'roles:kubernetes-pool-vsphere':
- match: grain
- static-routes
3 changes: 2 additions & 1 deletion cluster/vsphere/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ MASTER_MEMORY_MB=1024
MASTER_CPU=1

NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24"))
NODE_IP_RANGES="10.244.0.0/16"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
NODE_MEMORY_MB=2048
NODE_CPU=1

Expand Down
3 changes: 2 additions & 1 deletion cluster/vsphere/config-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ MASTER_MEMORY_MB=1024
MASTER_CPU=1

NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24"))
NODE_IP_RANGES="10.244.0.0/16"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
NODE_MEMORY_MB=1024
NODE_CPU=1

Expand Down
3 changes: 2 additions & 1 deletion cluster/vsphere/templates/create-dynamic-salt-files.sh
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ dns_replicas: ${DNS_REPLICAS:-1}
dns_server: $DNS_SERVER_IP
dns_domain: $DNS_DOMAIN
e2e_storage_test_environment: "${E2E_STORAGE_TEST_ENVIRONMENT:-false}"
cluster_cidr: "$NODE_IP_RANGES"
allocate_node_cidrs: "${ALLOCATE_NODE_CIDRS:-true}"
EOF

mkdir -p /srv/salt-overlay/salt/nginx
Expand Down
1 change: 1 addition & 0 deletions cluster/vsphere/templates/salt-master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cbr-cidr: $MASTER_IP_RANGE
cloud: vsphere
EOF

Expand Down
1 change: 0 additions & 1 deletion cluster/vsphere/templates/salt-minion.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ grains:
roles:
- kubernetes-pool
- kubernetes-pool-vsphere
cbr-cidr: $NODE_IP_RANGE
cloud: vsphere
EOF

Expand Down
48 changes: 42 additions & 6 deletions cluster/vsphere/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -262,9 +262,6 @@ function kube-check {
done
}




#
# verify if salt master is up. check 30 times and then echo out bad output and return 0
#
Expand Down Expand Up @@ -306,6 +303,41 @@ function remote-pgrep {
done
}

# identify the pod routes and route them together.
#
# Assumptions:
# All packages have been installed and kubelet has started running.
#
function setup-pod-routes {
# wait till the kubelet sets up the bridge.
echo "Setting up routes"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "check if cbr0 bridge is ready on ${NODE_NAMES[$i]}\n"
kube-check ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"'
done


# identify the subnet assigned to the node by the kubernertes controller manager.
KUBE_NODE_BRIDGE_NETWORK=()
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf " finding network of cbr0 bridge on node ${NODE_NAMES[$i]}\n"
network=$(kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1')
KUBE_NODE_BRIDGE_NETWORK+=("${network}")
done


# make the pods visible to each other.
local j
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
printf "setting up routes for ${NODE_NAMES[$i]}"
for (( j=0; j<${#NODE_NAMES[@]}; j++)); do
if [[ $i != $j ]]; then
kube-ssh ${KUBE_NODE_IP_ADDRESSES[$i]} "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[$j]} gw ${KUBE_NODE_IP_ADDRESSES[$j]}"
fi
done
done
}

# Instantiate a kubernetes cluster
#
# Assumed vars:
Expand All @@ -331,8 +363,10 @@ function kube-up {
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'"
echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
Expand Down Expand Up @@ -365,7 +399,7 @@ function kube-up {
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/hostname.sh"
echo "KUBE_MASTER=${KUBE_MASTER}"
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "NODE_IP_RANGE=${NODE_IP_RANGES[$i]}"
echo "NODE_IP_RANGE=$NODE_IP_RANGES"
grep -v "^#" "${KUBE_ROOT}/cluster/vsphere/templates/salt-minion.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"

Expand Down Expand Up @@ -427,8 +461,10 @@ function kube-up {
done
printf " OK\n"
done
echo "Kubernetes cluster created."

setup-pod-routes

echo "Kubernetes cluster created."
# TODO use token instead of basic auth
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
Expand All @@ -444,14 +480,14 @@ function kube-up {

create-kubeconfig
)
printf "\n"

echo
echo "Sanity checking cluster..."

sleep 5

# Basic sanity checking
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
# Make sure docker is installed
kube-ssh "${KUBE_NODE_IP_ADDRESSES[$i]}" which docker > /dev/null || {
Expand Down

0 comments on commit fe7568d

Please sign in to comment.