Skip to content

Commit

Permalink
Merge pull request kubernetes#4268 from rajatchopra/master
Browse files Browse the repository at this point in the history
Fix vagrant networking round 2
  • Loading branch information
zmerlynn committed Feb 10, 2015
2 parents e27d534 + 1d12cad commit 2f231e0
Show file tree
Hide file tree
Showing 6 changed files with 89 additions and 77 deletions.
3 changes: 3 additions & 0 deletions cluster/saltbase/salt/kubelet/init.sls
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,7 @@ kubelet:
- file: /etc/init.d/kubelet
{% endif %}
- file: /var/lib/kubelet/kubernetes_auth
{% if grains.network_mode is defined and grains.network_mode == 'openvswitch' %}
- sls: sdn
{% endif %}

4 changes: 4 additions & 0 deletions cluster/saltbase/salt/top.sls
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ base:
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
- openvpn
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] == 'vagrant' %}
- docker
- sdn
{% endif %}

'roles:kubernetes-pool-vsphere':
- match: grain
Expand Down
9 changes: 6 additions & 3 deletions cluster/vagrant/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,12 +29,15 @@ export MASTER_NAME="${INSTANCE_PREFIX}-master"
# Map out the IPs, names and container subnets of each minion
export MINION_IP_BASE="10.245.1."
MINION_CONTAINER_SUBNET_BASE="10.246"
MASTER_CONTAINER_NETMASK="255.255.255.0"
MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1"
MASTER_CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.1/24"
CONTAINER_SUBNET="${MINION_CONTAINER_SUBNET_BASE}.0.0/16"
for ((i=0; i < NUM_MINIONS; i++)) do
MINION_IPS[$i]="${MINION_IP_BASE}$((i+3))"
MINION_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))"
MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.${i}.1/24"
MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.${i}.1"
MINION_CONTAINER_SUBNETS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
MINION_CONTAINER_ADDRS[$i]="${MINION_CONTAINER_SUBNET_BASE}.$((i+1)).1"
MINION_CONTAINER_NETMASKS[$i]="255.255.255.0"
VAGRANT_MINION_NAMES[$i]="minion-$((i+1))"
done
Expand Down Expand Up @@ -69,4 +72,4 @@ DNS_REPLICAS=1

# Optional: Enable setting flags for kube-apiserver to turn on behavior in active-dev
RUNTIME_CONFIG=""
#RUNTIME_CONFIG="api/v1beta3"
#RUNTIME_CONFIG="api/v1beta3"
111 changes: 47 additions & 64 deletions cluster/vagrant/provision-network.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,63 +16,15 @@

DOCKER_BRIDGE=kbr0
OVS_SWITCH=obr0
GRE_TUNNEL_BASE=gre
DOCKER_OVS_TUN=tun0
TUNNEL_BASE=gre
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
POST_NETWORK_SCRIPT_DIR=/kubernetes-vagrant
POST_NETWORK_SCRIPT=${POST_NETWORK_SCRIPT_DIR}/network_closure.sh

# ensure location of POST_NETWORK_SCRIPT exists
mkdir -p $POST_NETWORK_SCRIPT_DIR

# add docker bridge ifcfg file
cat <<EOF > ${NETWORK_CONF_PATH}ifcfg-${DOCKER_BRIDGE}
# Generated by yours truly
DEVICE=${DOCKER_BRIDGE}
ONBOOT=yes
TYPE=Bridge
BOOTPROTO=static
IPADDR=${MINION_CONTAINER_ADDR}
NETMASK=${MINION_CONTAINER_NETMASK}
STP=yes
EOF

# add the ovs bridge ifcfg file
cat <<EOF > ${NETWORK_CONF_PATH}ifcfg-${OVS_SWITCH}
DEVICE=${OVS_SWITCH}
ONBOOT=yes
DEVICETYPE=ovs
TYPE=OVSBridge
BOOTPROTO=static
HOTPLUG=no
BRIDGE=${DOCKER_BRIDGE}
EOF

# now loop through all other minions and create persistent gre tunnels
GRE_NUM=0
for remote_ip in "${MINION_IPS[@]}"
do
if [ "${remote_ip}" == "${MINION_IP}" ]; then
continue
fi
((GRE_NUM++)) || echo
GRE_TUNNEL=${GRE_TUNNEL_BASE}${GRE_NUM}
# ovs-vsctl add-port ${OVS_SWITCH} ${GRE_TUNNEL} -- set interface ${GRE_TUNNEL} type=gre options:remote_ip=${remote_ip}
cat <<EOF > ${NETWORK_CONF_PATH}ifcfg-${GRE_TUNNEL}
DEVICE=${GRE_TUNNEL}
ONBOOT=yes
DEVICETYPE=ovs
TYPE=OVSTunnel
OVS_BRIDGE=${OVS_SWITCH}
OVS_TUNNEL_TYPE=gre
OVS_TUNNEL_OPTIONS="options:remote_ip=${remote_ip}"
EOF
done

# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
cat <<EOF > ${NETWORK_CONF_PATH}route-${DOCKER_BRIDGE}
${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${MINION_CONTAINER_ADDR}
EOF

# generate the post-configure script to be called by salt as cmd.wait
cat <<EOF > ${POST_NETWORK_SCRIPT}
#!/bin/bash
Expand All @@ -81,27 +33,58 @@ set -e
# Only do this operation once, otherwise, we get docker.service files output on disk, and the command line arguments get applied multiple times
grep -q kbr0 /etc/sysconfig/docker || {
CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]})
CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]})
# Stop docker before making these updates
systemctl stop docker
# NAT interface fails to revive on network restart, so OR-gate to true
systemctl restart network.service || true
# set docker bridge up, and set stp on the ovs bridge
# create new docker bridge
ip link set dev ${DOCKER_BRIDGE} down || true
brctl delbr ${DOCKER_BRIDGE} || true
brctl addbr ${DOCKER_BRIDGE}
ip link set dev ${DOCKER_BRIDGE} up
ovs-vsctl set Bridge ${OVS_SWITCH} stp_enable=true
ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up
# add ovs bridge
ovs-vsctl del-br ${OVS_SWITCH} || true
ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure
ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13
ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true
ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10
# add tun device
ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true
ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9
brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN}
ip link set ${DOCKER_OVS_TUN} up
# add oflow rules, because we do not want to use stp
ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH}
# now loop through all other minions and create persistent gre tunnels
NODE_INDEX=0
for remote_ip in "\${CONTAINER_IPS[@]}"
do
if [ "\${remote_ip}" == "${NODE_IP}" ]; then
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9"
else
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10"
fi
((NODE_INDEX++)) || true
done
# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR}
# modify the docker service file such that it uses the kube docker bridge and not its own
#echo "OPTIONS=-b=kbr0 --iptables=false --selinux-enabled" > /etc/sysconfig/docker
echo "OPTIONS='-b=kbr0 --iptables=false --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker
echo "OPTIONS='-b=kbr0 --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker
systemctl daemon-reload
systemctl restart docker.service
# setup iptables masquerade rules so the pods can reach the internet
iptables -t nat -A POSTROUTING -s ${CONTAINER_SUBNET} ! -d ${CONTAINER_SUBNET} -j MASQUERADE
# persist please
iptables-save >& /etc/sysconfig/iptables
systemctl start docker
}
EOF

Expand Down
26 changes: 23 additions & 3 deletions cluster/vagrant/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,13 @@ function create-provision-scripts {
echo "MASTER_IP='${MASTER_IP}'"
echo "MINION_NAMES=(${MINION_NAMES[@]})"
echo "MINION_IPS=(${MINION_IPS[@]})"
echo "NODE_IP='${MASTER_IP}'"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'"
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
echo "PORTAL_NET='${PORTAL_NET}'"
echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
Expand All @@ -80,6 +87,7 @@ function create-provision-scripts {
echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh"
) > "${KUBE_TEMP}/master-start.sh"

for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
Expand All @@ -91,8 +99,11 @@ function create-provision-scripts {
echo "MINION_IPS=(${MINION_IPS[@]})"
echo "MINION_IP='${MINION_IPS[$i]}'"
echo "MINION_ID='$i'"
echo "MINION_CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'"
echo "MINION_CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'"
echo "NODE_IP='${MINION_IPS[$i]}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "CONTAINER_ADDR='${MINION_CONTAINER_ADDRS[$i]}'"
echo "CONTAINER_NETMASK='${MINION_CONTAINER_NETMASKS[$i]}'"
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh"
Expand Down Expand Up @@ -257,6 +268,10 @@ function find-vagrant-name-by-ip {
# Find the vagrant machien name based on the host name of the minion
function find-vagrant-name-by-minion-name {
local ip="$1"
if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then
echo "master"
return $?
fi
local ip_pattern="${INSTANCE_PREFIX}-minion-(.*)"

[[ $ip =~ $ip_pattern ]] || {
Expand All @@ -280,14 +295,19 @@ function ssh-to-node {
return 1
}

vagrant ssh "${machine}" -c "${cmd}" | grep -v "Connection to.*closed"
vagrant ssh "${machine}" -c "${cmd}"
}

# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo systemctl restart kube-proxy"
}

# Restart the apiserver
function restart-apiserver {
ssh-to-node "${master}" "sudo systemctl restart kube-apiserver"
}

function setup-monitoring-firewall {
echo "TODO" 1>&2
}
Expand Down
13 changes: 6 additions & 7 deletions hack/e2e-suite/services.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,6 @@ source "${KUBE_VERSION_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"

prepare-e2e

if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
echo "WARNING: Skipping services.sh for ${KUBERNETES_PROVIDER}. See https://github.com/GoogleCloudPlatform/kubernetes/issues/3655"
exit 0
fi

function error() {
echo "$@" >&2
exit 1
Expand Down Expand Up @@ -266,7 +261,7 @@ function verify_from_container() {
for i in $(seq -s' ' 1 $4); do
ok=false
for j in $(seq -s' ' 1 10); do
if wget -q -T 1 -O - http://$2:$3; then
if wget -q -T 5 -O - http://$2:$3; then
echo
ok=true
break
Expand Down Expand Up @@ -420,7 +415,11 @@ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
#
echo "Test 6: Restart the master, make sure portals come back."
echo "Restarting the master"
ssh-to-node "${master}" "sudo /etc/init.d/kube-apiserver restart"
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
restart-apiserver "${master}"
else
ssh-to-node "${master}" "sudo /etc/init.d/kube-apiserver restart"
fi
sleep 5
echo "Verifying the portals from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
Expand Down

0 comments on commit 2f231e0

Please sign in to comment.