Skip to content

Commit

Permalink
Upgrade to Fedora 21, Docker 1.6, clean-up SDN
Browse files Browse the repository at this point in the history
  • Loading branch information
derekwaynecarr committed Jun 4, 2015
1 parent a161edb commit 2168cee
Show file tree
Hide file tree
Showing 11 changed files with 147 additions and 104 deletions.
6 changes: 2 additions & 4 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ $kube_provider_boxes = {
},
:virtualbox => {
'fedora' => {
:box_name => 'kube-fedora20',
:box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box'
:box_name => 'kube-fedora21',
:box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-21_chef-provisionerless.box'
}
},
:libvirt => {
Expand Down Expand Up @@ -211,7 +211,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
c.vm.provision "shell", run: "always", path: script
end
c.vm.network "private_network", ip: "#{$master_ip}"
c.vm.hostname = ENV['MASTER_NAME']
end

# Kubernetes minion
Expand All @@ -229,7 +228,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
minion.vm.provision "shell", run: "always", path: script
end
minion.vm.network "private_network", ip: "#{minion_ip}"
minion.vm.hostname = minion_hostname
end
end
end
8 changes: 8 additions & 0 deletions cluster/saltbase/salt/docker/default
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{% set docker_opts = "" -%}
{% if grains.docker_opts is defined -%}
{% set docker_opts = grains.docker_opts -%}
{% endif -%}

DOCKER_OPTS='{{docker_opts}}'
OPTIONS='{{docker_opts}}'
DOCKER_CERT_PATH=/etc/docker
13 changes: 13 additions & 0 deletions cluster/saltbase/salt/docker/init.sls
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,28 @@ bridge-utils:
pkg.installed

{% if grains.os_family == 'RedHat' %}

docker-io:
pkg:
- installed

{{ environment_file }}:
file.managed:
- source: salt://docker/default
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true

docker:
service.running:
- enable: True
- require:
- pkg: docker-io
- watch:
- file: {{ environment_file }}
- pkg: docker-io

{% else %}

Expand Down
3 changes: 2 additions & 1 deletion cluster/saltbase/salt/kubelet/default
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@
# the debugging handlers (/run and /exec) to prevent arbitrary code execution on
# the master.
# TODO(roberthbailey): Make this configurable via an env var in config-default.sh

{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
{% if grains.cloud == 'gce' -%}
{% if grains.cloud in ['gce', 'vagrant'] -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% set api_servers_with_port = "" -%}
{% set debugging_handlers = "--enable-debugging-handlers=false" -%}
Expand Down
14 changes: 0 additions & 14 deletions cluster/saltbase/salt/sdn/init.sls

This file was deleted.

3 changes: 0 additions & 3 deletions cluster/saltbase/salt/top.sls
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ base:
- docker
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
- openvpn-client
{% else %}
- sdn
{% endif %}
- helpers
- cadvisor
Expand Down Expand Up @@ -50,7 +48,6 @@ base:
{% if grains['cloud'] is defined and grains['cloud'] == 'vagrant' %}
- docker
- kubelet
- sdn
{% endif %}
{% if grains['cloud'] is defined and grains['cloud'] == 'aws' %}
- docker
Expand Down
7 changes: 5 additions & 2 deletions cluster/vagrant/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,11 @@ ELASTICSEARCH_LOGGING_REPLICAS=1
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"

# Extra options to set on the Docker command line. This is useful for setting
# --insecure-registry for local registries.
DOCKER_OPTS=""
# --insecure-registry for local registries, or globally configuring selinux options
# TODO Enable selinux when Fedora 21 repositories get an updated docker package
# see https://bugzilla.redhat.com/show_bug.cgi?id=1216151
#EXTRA_DOCKER_OPTS="-b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8"
EXTRA_DOCKER_OPTS="-b=cbr0 --insecure-registry 10.0.0.0/8"

# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS=true
Expand Down
23 changes: 23 additions & 0 deletions cluster/vagrant/provision-master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,19 @@
# exit on any error
set -e

# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${MASTER_NAME}

# Workaround to vagrant inability to guess interface naming sequence
# Tell system to abandon the new naming scheme and use eth* instead
rm -f /etc/sysconfig/network-scripts/ifcfg-enp0s3

# Disable network interface being managed by Network Manager (needed for Fedora 21+)
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
systemctl restart network

function release_not_found() {
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
echo "are running from a clone of the git repo, please run ./build/release.sh." >&2
Expand Down Expand Up @@ -56,6 +69,10 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
fi
done
echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master.
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts

# Configure the openvswitch network
provision-network

# Update salt configuration
mkdir -p /etc/salt/minion.d
Expand All @@ -81,6 +98,7 @@ grains:
roles:
- kubernetes-master
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF

mkdir -p /srv/salt-overlay/pillar
Expand Down Expand Up @@ -147,6 +165,11 @@ if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
# Make a kubeconfig file with the token.
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
)
mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
Expand Down
21 changes: 21 additions & 0 deletions cluster/vagrant/provision-minion.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,25 @@
# exit on any error
set -e

# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
hostnamectl set-hostname ${MINION_NAME}

# Workaround to vagrant inability to guess interface naming sequence
# Tell system to abandon the new naming scheme and use eth* instead
rm -f /etc/sysconfig/network-scripts/ifcfg-enp0s3

# Disable network interface being managed by Network Manager (needed for Fedora 21+)
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
systemctl restart network

# Setup hosts file to support ping by hostname to master
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding $MASTER_NAME to hosts file"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi
echo "$MINION_IP $MINION_NAME" >> /etc/hosts

# Setup hosts file to support ping by hostname to each minion in the cluster
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
Expand All @@ -33,6 +47,12 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
fi
done

# Configure network
provision-network

# Placeholder for any other manifests that may be per-node.
mkdir -p /etc/kubernetes/manifests

# Let the minion know who its master is
# Recover the salt-minion if the salt-master network changes
## auth_timeout - how long we want to wait for a time out
Expand Down Expand Up @@ -73,6 +93,7 @@ grains:
- kubernetes-pool
cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")'
hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF

# we will run provision to update code each time we test, so we do not want to do salt install each time
Expand Down
143 changes: 67 additions & 76 deletions cluster/vagrant/provision-network.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,85 +14,76 @@
# See the License for the specific language governing permissions and
# limitations under the License.

DOCKER_BRIDGE=kbr0
DOCKER_BRIDGE=cbr0
OVS_SWITCH=obr0
DOCKER_OVS_TUN=tun0
TUNNEL_BASE=gre
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
POST_NETWORK_SCRIPT_DIR=/kubernetes-vagrant
POST_NETWORK_SCRIPT=${POST_NETWORK_SCRIPT_DIR}/network_closure.sh

# ensure location of POST_NETWORK_SCRIPT exists
mkdir -p $POST_NETWORK_SCRIPT_DIR

# generate the post-configure script to be called by salt as cmd.wait
cat <<EOF > ${POST_NETWORK_SCRIPT}
#!/bin/bash
set -e
# Only do this operation if the bridge is not defined
ifconfig | grep -q kbr0 || {
CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]})
CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]})
# Stop docker before making these updates
systemctl stop docker
# Install openvswitch
yum install -y openvswitch
systemctl enable openvswitch
systemctl start openvswitch
# create new docker bridge
ip link set dev ${DOCKER_BRIDGE} down || true
brctl delbr ${DOCKER_BRIDGE} || true
brctl addbr ${DOCKER_BRIDGE}
ip link set dev ${DOCKER_BRIDGE} up
ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up
# add ovs bridge
ovs-vsctl del-br ${OVS_SWITCH} || true
ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure
ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13
ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true
ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10
# add tun device
ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true
ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9
brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN}
ip link set ${DOCKER_OVS_TUN} up
# add oflow rules, because we do not want to use stp
ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH}
# now loop through all other minions and create persistent gre tunnels
NODE_INDEX=0
for remote_ip in "\${CONTAINER_IPS[@]}"
do
if [ "\${remote_ip}" == "${NODE_IP}" ]; then
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=output:9"
else
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=\${CONTAINER_SUBNETS[\${NODE_INDEX}]},actions=set_field:\${remote_ip}->tun_dst,output:10"
fi
((NODE_INDEX++)) || true
done
# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR}
# modify the docker service file such that it uses the kube docker bridge and not its own
echo "OPTIONS='-b=kbr0 --selinux-enabled ${DOCKER_OPTS}'" >/etc/sysconfig/docker
systemctl daemon-reload
systemctl start docker
systemctl restart kubelet

# provision network configures the ovs network for pods
function provision-network {
echo "Verifying network configuration"

# Only do this operation if the bridge is not defined
ifconfig | grep -q ${DOCKER_BRIDGE} || {

echo "It looks like the required network bridge has not yet been created"

CONTAINER_SUBNETS=(${MASTER_CONTAINER_SUBNET} ${MINION_CONTAINER_SUBNETS[@]})
CONTAINER_IPS=(${MASTER_IP} ${MINION_IPS[@]})

# Install openvswitch
echo "Installing, enabling prerequisites"
yum install -y openvswitch bridge-utils
systemctl enable openvswitch
systemctl start openvswitch

# create new docker bridge
echo "Create a new docker bridge"
ip link set dev ${DOCKER_BRIDGE} down || true
brctl delbr ${DOCKER_BRIDGE} || true
brctl addbr ${DOCKER_BRIDGE}
ip link set dev ${DOCKER_BRIDGE} up
ifconfig ${DOCKER_BRIDGE} ${CONTAINER_ADDR} netmask ${CONTAINER_NETMASK} up

# add ovs bridge
echo "Add ovs bridge"
ovs-vsctl del-br ${OVS_SWITCH} || true
ovs-vsctl add-br ${OVS_SWITCH} -- set Bridge ${OVS_SWITCH} fail-mode=secure
ovs-vsctl set bridge ${OVS_SWITCH} protocols=OpenFlow13
ovs-vsctl del-port ${OVS_SWITCH} ${TUNNEL_BASE}0 || true
ovs-vsctl add-port ${OVS_SWITCH} ${TUNNEL_BASE}0 -- set Interface ${TUNNEL_BASE}0 type=${TUNNEL_BASE} options:remote_ip="flow" options:key="flow" ofport_request=10

# add tun device
echo "Add tun device"
ovs-vsctl del-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} || true
ovs-vsctl add-port ${OVS_SWITCH} ${DOCKER_OVS_TUN} -- set Interface ${DOCKER_OVS_TUN} type=internal ofport_request=9
brctl addif ${DOCKER_BRIDGE} ${DOCKER_OVS_TUN}
ip link set ${DOCKER_OVS_TUN} up

# add oflow rules, because we do not want to use stp
echo "Add oflow rules"
ovs-ofctl -O OpenFlow13 del-flows ${OVS_SWITCH}

# now loop through all other minions and create persistent gre tunnels
echo "Creating persistent gre tunnels"
NODE_INDEX=0
for remote_ip in "${CONTAINER_IPS[@]}"
do
if [ "\${remote_ip}" == "${NODE_IP}" ]; then
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,ip,in_port=10,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=output:9"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,arp,in_port=10,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=output:9"
else
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,ip,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=set_field:${remote_ip}->tun_dst,output:10"
ovs-ofctl -O OpenFlow13 add-flow ${OVS_SWITCH} "table=0,in_port=9,arp,nw_dst=${CONTAINER_SUBNETS[${NODE_INDEX}]},actions=set_field:${remote_ip}->tun_dst,output:10"
fi
((NODE_INDEX++)) || true
done
echo "Created persistent gre tunnels"

# add ip route rules such that all pod traffic flows through docker bridge and consequently to the gre tunnels
echo "Add ip route rules such that all pod traffic flows through docker bridge"
ip route add ${CONTAINER_SUBNET} dev ${DOCKER_BRIDGE} scope link src ${CONTAINER_ADDR}
}
echo "Network configuration verified"
}
EOF

chmod +x ${POST_NETWORK_SCRIPT}
Loading

0 comments on commit 2168cee

Please sign in to comment.