Skip to content

Commit

Permalink
Merge pull request kubernetes#17601 from eosrei/shell-var-MINION-to-NODE
Browse files Browse the repository at this point in the history
Auto commit by PR queue bot
  • Loading branch information
k8s-merge-robot committed Nov 26, 2015
2 parents 492782c + a506030 commit 0c4f302
Show file tree
Hide file tree
Showing 60 changed files with 403 additions and 403 deletions.
6 changes: 3 additions & 3 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ END
end

# The number of minions to provision
$num_minion = (ENV['NUM_MINIONS'] || 1).to_i
$num_minion = (ENV['NUM_NODES'] || 1).to_i

# ip configuration
$master_ip = ENV['MASTER_IP']
$minion_ip_base = ENV['MINION_IP_BASE'] || ""
$minion_ip_base = ENV['NODE_IP_BASE'] || ""
$minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" }

# Determine the OS platform to use
Expand Down Expand Up @@ -105,7 +105,7 @@ end
# When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens.
# This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.)
$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i
$vm_minion_mem = (ENV['KUBERNETES_MINION_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i
$vm_minion_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
def setvmboxandurl(config, provider)
Expand Down
36 changes: 18 additions & 18 deletions cluster/aws/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,26 +16,26 @@

ZONE=${KUBE_AWS_ZONE:-us-west-2a}
MASTER_SIZE=${MASTER_SIZE:-}
MINION_SIZE=${MINION_SIZE:-}
NUM_MINIONS=${NUM_MINIONS:-4}
NODE_SIZE=${NODE_SIZE:-}
NUM_NODES=${NUM_NODES:-4}

# Dynamically set node sizes so that Heapster has enough space to run
if [[ -z ${MINION_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then
MINION_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then
MINION_SIZE="t2.small"
if [[ -z ${NODE_SIZE} ]]; then
if (( ${NUM_NODES} < 50 )); then
NODE_SIZE="t2.micro"
elif (( ${NUM_NODES} < 150 )); then
NODE_SIZE="t2.small"
else
MINION_SIZE="t2.medium"
NODE_SIZE="t2.medium"
fi
fi

# Dynamically set the master size by the number of nodes, these are guesses
# TODO: gather some data
if [[ -z ${MASTER_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then
if (( ${NUM_NODES} < 50 )); then
MASTER_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then
elif (( ${NUM_NODES} < 150 )); then
MASTER_SIZE="t2.small"
else
MASTER_SIZE="t2.medium"
Expand All @@ -56,7 +56,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_ID=${INSTANCE_PREFIX}
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
IAM_PROFILE_MASTER="kubernetes-master"
IAM_PROFILE_MINION="kubernetes-minion"
IAM_PROFILE_NODE="kubernetes-minion"

LOG="/dev/null"

Expand All @@ -66,13 +66,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20}
MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}"
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8}
# The minions root EBS volume size (used to house Docker images)
MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}"
MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32}
NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}"
NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32}

MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_SCOPES=""
NODE_TAG="${INSTANCE_PREFIX}-minion"
NODE_SCOPES=""
POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
Expand Down Expand Up @@ -121,7 +121,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
# TODO: actually configure ASG or similar
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi

Expand All @@ -130,11 +130,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco

# Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!
ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true}
ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true}

# OS options for minions
KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}"
KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}"
KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}"
COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}"
CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}"
RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}"
Expand Down
36 changes: 18 additions & 18 deletions cluster/aws/config-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,26 +17,26 @@
ZONE=${KUBE_AWS_ZONE:-us-west-2a}

MASTER_SIZE=${MASTER_SIZE:-}
MINION_SIZE=${MINION_SIZE:-}
NUM_MINIONS=${NUM_MINIONS:-2}
NODE_SIZE=${NODE_SIZE:-}
NUM_NODES=${NUM_NODES:-2}

# Dynamically set node sizes so that Heapster has enough space to run
if [[ -z ${MINION_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then
MINION_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then
MINION_SIZE="t2.small"
if [[ -z ${NODE_SIZE} ]]; then
if (( ${NUM_NODES} < 50 )); then
NODE_SIZE="t2.micro"
elif (( ${NUM_NODES} < 150 )); then
NODE_SIZE="t2.small"
else
MINION_SIZE="t2.medium"
NODE_SIZE="t2.medium"
fi
fi

# Dynamically set the master size by the number of nodes, these are guesses
# TODO: gather some data
if [[ -z ${MASTER_SIZE} ]]; then
if (( ${NUM_MINIONS} < 50 )); then
if (( ${NUM_NODES} < 50 )); then
MASTER_SIZE="t2.micro"
elif (( ${NUM_MINIONS} < 150 )); then
elif (( ${NUM_NODES} < 150 )); then
MASTER_SIZE="t2.small"
else
MASTER_SIZE="t2.medium"
Expand All @@ -54,7 +54,7 @@ INSTANCE_PREFIX="${KUBE_AWS_INSTANCE_PREFIX:-e2e-test-${USER}}"
CLUSTER_ID=${INSTANCE_PREFIX}
AWS_SSH_KEY=${AWS_SSH_KEY:-$HOME/.ssh/kube_aws_rsa}
IAM_PROFILE_MASTER="kubernetes-master"
IAM_PROFILE_MINION="kubernetes-minion"
IAM_PROFILE_NODE="kubernetes-minion"

LOG="/dev/null"

Expand All @@ -64,13 +64,13 @@ MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20}
MASTER_ROOT_DISK_TYPE="${MASTER_ROOT_DISK_TYPE:-gp2}"
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-8}
# The minions root EBS volume size (used to house Docker images)
MINION_ROOT_DISK_TYPE="${MINION_ROOT_DISK_TYPE:-gp2}"
MINION_ROOT_DISK_SIZE=${MINION_ROOT_DISK_SIZE:-32}
NODE_ROOT_DISK_TYPE="${NODE_ROOT_DISK_TYPE:-gp2}"
NODE_ROOT_DISK_SIZE=${NODE_ROOT_DISK_SIZE:-32}

MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_SCOPES=""
NODE_TAG="${INSTANCE_PREFIX}-minion"
NODE_SCOPES=""
POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
Expand Down Expand Up @@ -117,7 +117,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
# TODO: actually configure ASG or similar
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
fi

Expand All @@ -126,11 +126,11 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAcco

# Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!
ENABLE_MINION_PUBLIC_IP=${KUBE_ENABLE_MINION_PUBLIC_IP:-true}
ENABLE_NODE_PUBLIC_IP=${KUBE_ENABLE_NODE_PUBLIC_IP:-true}

# OS options for minions
KUBE_OS_DISTRIBUTION="${KUBE_OS_DISTRIBUTION:-vivid}"
KUBE_MINION_IMAGE="${KUBE_MINION_IMAGE:-}"
KUBE_NODE_IMAGE="${KUBE_NODE_IMAGE:-}"
COREOS_CHANNEL="${COREOS_CHANNEL:-alpha}"
CONTAINER_RUNTIME="${KUBE_CONTAINER_RUNTIME:-docker}"
RKT_VERSION="${KUBE_RKT_VERSION:-0.5.5}"
Expand Down
8 changes: 4 additions & 4 deletions cluster/aws/coreos/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,11 @@
SSH_USER=core

function detect-minion-image (){
if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then
KUBE_MINION_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']")
if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then
KUBE_NODE_IMAGE=$(curl -s -L http://${COREOS_CHANNEL}.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json | python -c "import json,sys;obj=json.load(sys.stdin);print filter(lambda t: t['name']=='${AWS_REGION}', obj['amis'])[0]['hvm']")
fi
if [[ -z "${KUBE_MINION_IMAGE-}" ]]; then
echo "unable to determine KUBE_MINION_IMAGE"
if [[ -z "${KUBE_NODE_IMAGE-}" ]]; then
echo "unable to determine KUBE_NODE_IMAGE"
exit 2
fi
}
Expand Down
8 changes: 4 additions & 4 deletions cluster/aws/options.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,26 +27,26 @@ It is not a bad idea to set AWS_S3_BUCKET to something more human friendly.

AWS_S3_REGION is useful for people that want to control their data location, because of regulatory restrictions for example.

**MASTER_SIZE**, **MINION_SIZE**
**MASTER_SIZE**, **NODE_SIZE**

The instance type to use for creating the master/minion. Defaults to auto-sizing based on the number of nodes (see below).

For production usage, we recommend bigger instances, for example:

```
export MASTER_SIZE=c4.large
export MINION_SIZE=r3.large
export NODE_SIZE=r3.large
```

If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`.
If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`.
In particular for clusters less than 50 nodes it will
use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`.

Please note: `kube-up` utilizes ephemeral storage available on instances for docker storage. EBS-only instance types do not
support ephemeral storage and will default to docker storage on the root disk which is usually only 8GB.
EBS-only instance types include `t2`, `c4`, and `m4`.

**KUBE_ENABLE_MINION_PUBLIC_IP**
**KUBE_ENABLE_NODE_PUBLIC_IP**

Should a public IP automatically assigned to the minions? "true" or "false"
Defaults to: "true"
Expand Down
2 changes: 1 addition & 1 deletion cluster/aws/templates/create-dynamic-salt-files.sh
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ network_provider: '$(echo "$NETWORK_PROVIDER")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
num_nodes: $(echo "${NUM_MINIONS}")
num_nodes: $(echo "${NUM_NODES}")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF

Expand Down
4 changes: 2 additions & 2 deletions cluster/aws/trusty/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@
# A library of common helper functions for Ubuntus & Debians.

function detect-minion-image() {
if [[ -z "${KUBE_MINION_IMAGE=-}" ]]; then
if [[ -z "${KUBE_NODE_IMAGE=-}" ]]; then
detect-image
KUBE_MINION_IMAGE=$AWS_IMAGE
KUBE_NODE_IMAGE=$AWS_IMAGE
fi
}

Expand Down
Loading

0 comments on commit 0c4f302

Please sign in to comment.