Skip to content

Commit

Permalink
keep kubeproxy hostname consistent with kubelet
Browse files Browse the repository at this point in the history
  • Loading branch information
jiangyaoguo committed Nov 28, 2015
1 parent 70d2a02 commit a739fc4
Show file tree
Hide file tree
Showing 8 changed files with 71 additions and 56 deletions.
3 changes: 2 additions & 1 deletion cluster/aws/coreos/node.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ coreos:
--master=https://${MASTER_IP} \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
--v=2 \
--logtostderr=true
--logtostderr=true \
--hostname-override=${HOSTNAME_OVERRIDE}
Restart=always
RestartSec=10
Expand Down
6 changes: 3 additions & 3 deletions cluster/centos/node/scripts/kubelet.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,9 @@ EOF

KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${NODE_ADDRESS} \\
\${NODE_PORT} \\
\${NODE_HOSTNAME} \\
\${NODE_ADDRESS} \\
\${NODE_PORT} \\
\${NODE_HOSTNAME} \\
\${KUBELET_API_SERVER} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBELET_ARGS}"
Expand Down
5 changes: 5 additions & 0 deletions cluster/centos/node/scripts/proxy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@


MASTER_ADDRESS=${1:-"8.8.8.18"}
NODE_ADDRESS=${2:-"8.8.8.20"}

cat <<EOF >/opt/kubernetes/cfg/kube-proxy
# --logtostderr=true: log to standard error instead of files
Expand All @@ -24,12 +25,16 @@ KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
# --master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080"
EOF

KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${NODE_HOSTNAME} \\
\${KUBE_MASTER}"

cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,5 @@ kill timeout 60 # wait 60s between SIGTERM and SIGKILL.

exec /usr/local/bin/proxy \
--master=%(kubeapi_server)s \
--logtostderr=true
--logtostderr=true \
--hostname-override=%(kubelet_bind_addr)s
3 changes: 2 additions & 1 deletion cluster/libvirt-coreos/user_data_minion.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ coreos:
[Service]
ExecStart=/opt/kubernetes/bin/kube-proxy \
--master=http://${MASTER_IP}:8080
--master=http://${MASTER_IP}:8080 \
--hostname-override=${NODE_IPS[$i]}
Restart=always
RestartSec=2
Expand Down
1 change: 1 addition & 0 deletions cluster/rackspace/cloud-config/node-cloud-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ coreos:
--bind-address=$private_ipv4 \
--kubeconfig=/var/lib/kube-proxy/kubeconfig \
--logtostderr=true \
--hostname-override=$private_ipv4 \
--master=${FIRST_APISERVER_URL}
Restart=always
RestartSec=5
Expand Down
105 changes: 55 additions & 50 deletions cluster/ubuntu/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ function test-build-release() {
# From user input set the necessary k8s and etcd configuration information
function setClusterInfo() {
# Initialize NODE_IPS in setClusterInfo function
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
# NODE_IPS is defined as a global variable, and is concatenated with other nodeIP
# When setClusterInfo is called for many times, this could cause potential problems
# Such as, you will have NODE_IPS=192.168.0.2,192.168.0.3,192.168.0.2,192.168.0.3,
# which is obviously wrong.
NODE_IPS=""

local ii=0
for i in $nodes; do
nodeIP=${i#*@}
Expand Down Expand Up @@ -246,7 +246,8 @@ EOF
function create-kube-proxy-opts() {
cat <<EOF > ~/kube/default/kube-proxy
KUBE_PROXY_OPTS="\
--master=http://${1}:8080 \
--hostname-override=${1} \
--master=http://${2}:8080 \
--logtostderr=true"
EOF

Expand All @@ -267,7 +268,7 @@ EOF
function detect-master() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"
setClusterInfo
export KUBE_MASTER="${MASTER}"
export KUBE_MASTER="${MASTER}"
export KUBE_MASTER_IP="${MASTER_IP}"
echo "Using master ${MASTER_IP}"
}
Expand Down Expand Up @@ -348,7 +349,7 @@ function kube-up() {
}

function provision-master() {

echo -e "\nDeploying master on machine ${MASTER_IP}"

ssh $SSH_OPTS "$MASTER" "mkdir -p ~/kube/default"
Expand All @@ -371,7 +372,7 @@ function provision-master() {
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)

EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)

# remote login to MASTER and configue k8s master
Expand All @@ -388,10 +389,10 @@ function provision-master() {
create-kube-scheduler-opts
create-flanneld-opts '127.0.0.1'
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert
${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/
Expand All @@ -402,10 +403,10 @@ function provision-master() {
echo "Deploying master on machine ${MASTER_IP} failed"
exit 1
}
}
}

function provision-node() {

echo -e "\nDeploying node on machine ${1#*@}"

ssh $SSH_OPTS $1 "mkdir -p ~/kube/default"
Expand All @@ -422,21 +423,23 @@ function provision-node() {
# remote login to node and configue k8s node
ssh $SSH_OPTS -t "$1" "
source ~/kube/util.sh
setClusterInfo
create-kubelet-opts \
'${1#*@}' \
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
'${DNS_DOMAIN}'
create-kube-proxy-opts '${MASTER_IP}'
create-kube-proxy-opts \
'${1#*@}' \
'${MASTER_IP}'
create-flanneld-opts '${MASTER_IP}'
sudo -E -p '[sudo] password to start node: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
mkdir -p /opt/bin/
cp ~/kube/init_scripts/* /etc/init.d/
mkdir -p /opt/bin/
cp ~/kube/minion/* /opt/bin
service flanneld start
~/kube/reconfDocker.sh i
Expand All @@ -447,7 +450,7 @@ function provision-node() {
}

function provision-masterandnode() {

echo -e "\nDeploying master and node on machine ${MASTER_IP}"

ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
Expand All @@ -464,7 +467,7 @@ function provision-masterandnode() {
ubuntu/binaries/master/ \
ubuntu/binaries/minion \
"${MASTER}:~/kube"

EXTRA_SANS=(
IP:${MASTER_IP}
IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1
Expand All @@ -473,13 +476,13 @@ function provision-masterandnode() {
DNS:kubernetes.default.svc
DNS:kubernetes.default.svc.cluster.local
)

EXTRA_SANS=$(echo "${EXTRA_SANS[@]}" | tr ' ' ,)

# remote login to the master/node and configue k8s
ssh $SSH_OPTS -t "$MASTER" "
source ~/kube/util.sh
setClusterInfo
create-etcd-opts '${MASTER_IP}'
create-kube-apiserver-opts \
Expand All @@ -493,17 +496,19 @@ function provision-masterandnode() {
'${MASTER_IP}' \
'${DNS_SERVER_IP}' \
'${DNS_DOMAIN}'
create-kube-proxy-opts '${MASTER_IP}'
create-kube-proxy-opts \
'${MASTER_IP}' \
'${MASTER_IP}'
create-flanneld-opts '127.0.0.1'
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
sudo -E -p '[sudo] password to start master: ' -- /bin/bash -c '
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert
${PROXY_SETTING} ~/kube/make-ca-cert.sh \"${MASTER_IP}\" \"${EXTRA_SANS}\"
mkdir -p /opt/bin/
mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/
cp ~/kube/minion/* /opt/bin/
Expand All @@ -512,7 +517,7 @@ function provision-masterandnode() {
'" || {
echo "Deploying master and node on machine ${MASTER_IP} failed"
exit 1
}
}
}

# check whether kubelet has torn down all of the pods
Expand All @@ -531,14 +536,14 @@ function check-pods-torn-down() {

# Delete a kubernetes cluster
function kube-down() {

export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"
source "${KUBE_ROOT}/cluster/common.sh"

tear_down_alive_resources
check-pods-torn-down
check-pods-torn-down

local ii=0
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
Expand All @@ -553,7 +558,7 @@ function kube-down() {
/etc/init/etcd.conf \
/etc/init.d/etcd \
/etc/default/etcd
rm -rf /infra*
rm -rf /srv/kubernetes
'
Expand All @@ -562,20 +567,20 @@ function kube-down() {
if [[ "${roles[${ii}]}" == "ai" ]]; then
ssh $SSH_OPTS -t "$i" "sudo rm -rf /var/lib/kubelet"
fi

elif [[ "${roles[${ii}]}" == "i" ]]; then
echo "Cleaning on node ${i#*@}"
ssh $SSH_OPTS -t "$i" "
pgrep flanneld && \
sudo -p '[sudo] password to stop node: ' -- /bin/bash -c '
service flanneld stop
rm -rf /var/lib/kubelet
rm -rf /var/lib/kubelet
'
" || echo "Cleaning on node ${i#*@} failed"
else
echo "unsupported role for ${i}"
fi

ssh $SSH_OPTS -t "$i" "sudo -- /bin/bash -c '
rm -f \
/opt/bin/kube* \
Expand All @@ -586,7 +591,7 @@ function kube-down() {
/etc/init.d/flanneld \
/etc/default/kube* \
/etc/default/flanneld
rm -rf ~/kube
rm -f /run/flannel/subnet.env
'" || echo "cleaning legacy files on ${i#*@} failed"
Expand All @@ -599,16 +604,16 @@ function kube-down() {
function prepare-push() {
# Use local binaries for kube-push
if [[ -z "${KUBE_VERSION}" ]]; then
echo "Use local binaries for kube-push"
echo "Use local binaries for kube-push"
if [[ ! -d "${KUBE_ROOT}/cluster/ubuntu/binaries" ]]; then
echo "No local binaries.Please check"
exit 1
else
else
echo "Please make sure all the required local binaries are prepared ahead"
sleep 3
fi
else
# Run download-release.sh to get the required release
# Run download-release.sh to get the required release
export KUBE_VERSION
"${KUBE_ROOT}/cluster/ubuntu/download-release.sh"
fi
Expand All @@ -617,13 +622,13 @@ function prepare-push() {
# Update a kubernetes master with expected release
function push-master() {
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"

if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi
export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"

setClusterInfo

local ii=0
Expand Down Expand Up @@ -651,8 +656,8 @@ function push-master() {
rm -f /run/flannel/subnet.env
rm -rf ~/kube
'" || echo "Cleaning master ${i#*@} failed"
fi
fi

if [[ "${roles[${ii}]}" == "a" ]]; then
provision-master
elif [[ "${roles[${ii}]}" == "ai" ]]; then
Expand All @@ -679,9 +684,9 @@ function push-node() {
fi

export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"

setClusterInfo

local node_ip=${1}
local ii=0
local existing=false
Expand Down Expand Up @@ -726,22 +731,22 @@ function push-node() {
echo "node ${node_ip} does not exist"
else
verify-cluster
fi
fi

}

# Update a kubernetes cluster with expected source
function kube-push() {
function kube-push() {
prepare-push
source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE:-config-default.sh}"

if [[ ! -f "${KUBE_ROOT}/cluster/ubuntu/binaries/master/kube-apiserver" ]]; then
echo "There is no required release of kubernetes, please check first"
exit 1
fi

export KUBECTL_PATH="${KUBE_ROOT}/cluster/ubuntu/binaries/kubectl"
#stop all the kube's process & etcd
#stop all the kube's process & etcd
local ii=0
for i in ${nodes}; do
if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then
Expand Down
Loading

0 comments on commit a739fc4

Please sign in to comment.