Skip to content

Commit

Permalink
Turn flannel off by default
Browse files Browse the repository at this point in the history
  • Loading branch information
bprashanth authored and gmarek committed Nov 25, 2015
1 parent ad2d3d4 commit 9aa0efa
Show file tree
Hide file tree
Showing 14 changed files with 83 additions and 49 deletions.
2 changes: 1 addition & 1 deletion cluster/gce/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}

# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail, flannel
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
Expand Down
4 changes: 1 addition & 3 deletions cluster/gce/config-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -125,12 +125,10 @@ KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
TEST_CLUSTER="${TEST_CLUSTER:-true}"

# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail
NETWORK_PROVIDER="${NETWORK_PROVIDER:-none}" # opencontrail,flannel
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"

# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Overlay network settings
OVERLAY_NETWORK=${OVERLAY_NETWORK:-true}
12 changes: 6 additions & 6 deletions cluster/saltbase/salt/flannel-server/flannel-server.manifest
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "flannel-helper",
"name": "flannel-server",
"namespace": "kube-system",
"labels": {
"app": "flannel-helper",
"app": "flannel-server",
"version": "v0.1"
}
},
"spec": {
"volumes": [
{
"name": "varlogflannel",
"name": "varlog",
"hostPath": {
"path": "/var/log"
}
Expand All @@ -30,8 +30,8 @@
],
"containers": [
{
"name": "flannel-helper",
"image": "bprashanth/flannel-helper:0.1",
"name": "flannel-server-helper",
"image": "gcr.io/google_containers/flannel-server-helper:0.1",
"args": [
"--network-config=/etc/kubernetes/network.json",
"--etcd-prefix=/kubernetes.io/network",
Expand Down Expand Up @@ -66,7 +66,7 @@
},
"volumeMounts": [
{
"name": "varlogflannel",
"name": "varlog",
"mountPath": "/var/log"
}
]
Expand Down
4 changes: 2 additions & 2 deletions cluster/saltbase/salt/flannel-server/network.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"Network": "192.168.0.0/16",
"SubnetLen": 26,
"Network": "172.16.0.0/12",
"SubnetLen": 24,
"Backend": {
"Type": "vxlan",
"VNI": 1
Expand Down
2 changes: 2 additions & 0 deletions cluster/saltbase/salt/flannel/init.sls
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# TODO: Run flannel daemon in a static pod once we've moved the overlay network
# setup into a network plugin.
flannel-tar:
archive:
- extracted
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,11 @@
{% if pillar['cluster_cidr'] is defined and pillar['cluster_cidr'] != "" -%}
{% set cluster_cidr = "--cluster-cidr=" + pillar['cluster_cidr'] -%}
{% endif -%}
{% if pillar['allocate_node_cidrs'] is defined -%}
# When we're using flannel it is responsible for cidr allocation.
# This is expected to be a short-term compromise.
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
{% set allocate_node_cidrs = "--allocate-node-cidrs=false" -%}
{% elif pillar['allocate_node_cidrs'] is defined -%}
{% set allocate_node_cidrs = "--allocate-node-cidrs=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
{% if pillar['terminated_pod_gc_threshold'] is defined -%}
Expand Down Expand Up @@ -39,7 +43,7 @@
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}

{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " --allocate-node-cidrs=false" + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}
{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + terminated_pod_gc + " " + cloud_provider + " " + cloud_config + service_account_key + pillar['log_level'] + " " + root_ca_file -%}


# test_args has to be kept at the end, so they'll overwrite any prior configuration
Expand Down
7 changes: 6 additions & 1 deletion cluster/saltbase/salt/kubelet/default
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,11 @@
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}

{% set experimental_flannel_overlay = "" -%}
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
{% set experimental_flannel_overlay = "--experimental-flannel-overlay=true" %}
{% endif -%}

# Run containers under the root cgroup and create a system container.
{% set system_container = "" -%}
{% set cgroup_root = "" -%}
Expand Down Expand Up @@ -117,4 +122,4 @@
{% endif -%}

# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{test_args}}"
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{experimental_flannel_overlay}} {{test_args}}"
4 changes: 4 additions & 0 deletions cluster/saltbase/salt/top.sls
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ base:
'roles:kubernetes-pool':
- match: grain
- docker
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel
{% endif %}
- helpers
- cadvisor
- kube-client-tools
Expand Down Expand Up @@ -41,8 +43,10 @@ base:
- match: grain
- generate-cert
- etcd
{% if pillar.get('network_provider', '').lower() == 'flannel' %}
- flannel-server
- flannel
{% endif %}
- kube-apiserver
- kube-controller-manager
- kube-scheduler
Expand Down
5 changes: 3 additions & 2 deletions cmd/kube-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -292,15 +292,16 @@ func (s *CMServer) Run(_ []string) error {
}

if s.AllocateNodeCIDRs {
// TODO: Pipe this as a command line flag that corresponds to overlay==true
if cloud == nil || true {
if cloud == nil {
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
} else if routes, ok := cloud.Routes(); !ok {
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
} else {
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
routeController.Run(s.NodeSyncPeriod)
}
} else {
glog.Infof("allocate-node-cidrs set to %v, node controller not creating routes", s.AllocateNodeCIDRs)
}

resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
Expand Down
23 changes: 9 additions & 14 deletions cmd/kubelet/app/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,8 @@ import (
)

const (
defaultRootDir = "/var/lib/kubelet"
networkConfig = "/var/run/flannel/network.json"
useDefaultOverlay = true
defaultRootDir = "/var/lib/kubelet"
experimentalFlannelOverlay = false
)

// KubeletServer encapsulates all of the parameters necessary for starting up
Expand Down Expand Up @@ -158,8 +157,8 @@ type KubeletServer struct {
KubeAPIBurst int

// Pull images one at a time.
SerializeImagePulls bool
UseDefaultOverlay bool
SerializeImagePulls bool
ExperimentalFlannelOverlay bool
}

// bootstrapping interface for kubelet, targets the initialization protocol
Expand Down Expand Up @@ -232,8 +231,7 @@ func NewKubeletServer() *KubeletServer {
ReconcileCIDR: true,
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
// Flannel parameters
UseDefaultOverlay: useDefaultOverlay,
ExperimentalFlannelOverlay: experimentalFlannelOverlay,
}
}

Expand Down Expand Up @@ -348,9 +346,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")

// Flannel config parameters
fs.BoolVar(&s.UseDefaultOverlay, "use-default-overlay", s.UseDefaultOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
fs.BoolVar(&s.ExperimentalFlannelOverlay, "experimental-flannel-overlay", s.ExperimentalFlannelOverlay, "Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]")
}

// UnsecuredKubeletConfig returns a KubeletConfig suitable for being run, or an error if the server setup
Expand Down Expand Up @@ -489,7 +485,7 @@ func (s *KubeletServer) UnsecuredKubeletConfig() (*KubeletConfig, error) {
Writer: writer,
VolumePlugins: ProbeVolumePlugins(),

UseDefaultOverlay: s.UseDefaultOverlay,
ExperimentalFlannelOverlay: s.ExperimentalFlannelOverlay,
}, nil
}

Expand Down Expand Up @@ -962,7 +958,7 @@ type KubeletConfig struct {
Writer io.Writer
VolumePlugins []volume.VolumePlugin

UseDefaultOverlay bool
ExperimentalFlannelOverlay bool
}

func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
Expand Down Expand Up @@ -1045,8 +1041,7 @@ func CreateAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.OOMAdjuster,
kc.SerializeImagePulls,
kc.ContainerManager,
// Flannel parameters
kc.UseDefaultOverlay,
kc.ExperimentalFlannelOverlay,
)

if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions docs/admin/kubelet.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ kubelet
--enable-server[=true]: Enable the Kubelet's server
--event-burst=10: Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0
--event-qps=5: If > 0, limit event creations per second to this value. If 0, unlimited.
--experimental-flannel-overlay[=false]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
--file-check-frequency=20s: Duration between checking config files for new data
--google-json-key="": The Google Cloud Platform Service Account JSON Key to use for authentication.
--healthz-bind-address=127.0.0.1: The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)
Expand Down Expand Up @@ -137,10 +138,9 @@ kubelet
--system-container="": Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: "").
--tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If --tls-cert-file and --tls-private-key-file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert-dir.
--tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
--use-default-overlay[=true]: Experimental support for starting the kubelet with the default overlay network (flannel). Assumes flanneld is already running in client mode. [default=false]
```

###### Auto generated by spf13/cobra on 23-Nov-2015
###### Auto generated by spf13/cobra on 24-Nov-2015


<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
Expand Down
2 changes: 1 addition & 1 deletion hack/verify-flags/known-flags.txt
Original file line number Diff line number Diff line change
Expand Up @@ -327,4 +327,4 @@ watch-only
whitelist-override-label
windows-line-endings
www-prefix
use-default-overlay
experimental-flannel-overlay
11 changes: 7 additions & 4 deletions pkg/kubelet/flannel_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ import (

// TODO: Move all this to a network plugin.
const (
// TODO: The location of default docker options is distro specific, so this
// probably won't work on anything other than debian/ubuntu. This is a
// short-term compromise till we've moved overlay setup into a plugin.
dockerOptsFile = "/etc/default/docker"
flannelSubnetKey = "FLANNEL_SUBNET"
flannelNetworkKey = "FLANNEL_NETWORK"
Expand Down Expand Up @@ -78,7 +81,7 @@ func (f *FlannelHelper) Handshake() (podCIDR string, err error) {
if _, err = os.Stat(f.subnetFile); err != nil {
return "", fmt.Errorf("Waiting for subnet file %v", f.subnetFile)
}
glog.Infof("(kubelet)Found flannel subnet file %v", f.subnetFile)
glog.Infof("Found flannel subnet file %v", f.subnetFile)

config, err := parseKVConfig(f.subnetFile)
if err != nil {
Expand Down Expand Up @@ -115,7 +118,7 @@ func writeDockerOptsFromFlannelConfig(flannelConfig map[string]string) error {
}
opts, ok := dockerOpts[dockerOptsKey]
if !ok {
glog.Errorf("(kubelet)Did not find docker opts, writing them")
glog.Errorf("Did not find docker opts, writing them")
opts = fmt.Sprintf(
" --bridge=cbr0 --iptables=false --ip-masq=false")
} else {
Expand All @@ -139,7 +142,7 @@ func parseKVConfig(filename string) (map[string]string, error) {
return config, err
}
str := string(buff)
glog.Infof("(kubelet) Read kv options %+v from %v", str, filename)
glog.Infof("Read kv options %+v from %v", str, filename)
for _, line := range strings.Split(str, "\n") {
kv := strings.Split(line, "=")
if len(kv) != 2 {
Expand All @@ -160,6 +163,6 @@ func writeKVConfig(filename string, kv map[string]string) error {
for k, v := range kv {
content += fmt.Sprintf("%v=%v\n", k, v)
}
glog.Warningf("(kubelet)Writing kv options %+v to %v", content, filename)
glog.Warningf("Writing kv options %+v to %v", content, filename)
return ioutil.WriteFile(filename, []byte(content), 0644)
}
44 changes: 33 additions & 11 deletions pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ func NewMainKubelet(
oomAdjuster *oom.OOMAdjuster,
serializeImagePulls bool,
containerManager cm.ContainerManager,
useDefaultOverlay bool,
flannelExperimentalOverlay bool,
) (*Kubelet, error) {

if rootDirectory == "" {
Expand Down Expand Up @@ -328,12 +328,19 @@ func NewMainKubelet(
cpuCFSQuota: cpuCFSQuota,
daemonEndpoints: daemonEndpoints,
containerManager: containerManager,
flannelExperimentalOverlay: flannelExperimentalOverlay,
flannelHelper: NewFlannelHelper(),
useDefaultOverlay: useDefaultOverlay,
}
if klet.flannelExperimentalOverlay {
glog.Infof("Flannel is in charge of podCIDR and overlay networking.")
}
if klet.kubeClient == nil {
glog.Infof("Master not setting up flannel overlay")
klet.useDefaultOverlay = false
// The master kubelet cannot wait for the flannel daemon because it is responsible
// for starting up the flannel server in a static pod. So even though the flannel
// daemon runs on the master, it doesn't hold up cluster bootstrap. All the pods
// on the master run with host networking, so the master flannel doesn't care
// even if the network changes. We only need it for the master proxy.
klet.flannelExperimentalOverlay = false
}
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
return nil, err
Expand Down Expand Up @@ -656,8 +663,12 @@ type Kubelet struct {
// oneTimeInitializer is used to initialize modules that are dependent on the runtime to be up.
oneTimeInitializer sync.Once

useDefaultOverlay bool
flannelHelper *FlannelHelper
flannelExperimentalOverlay bool

// TODO: Flannelhelper doesn't store any state, we can instantiate it
// on the fly if we're confident the dbus connetions it opens doesn't
// put the system under duress.
flannelHelper *FlannelHelper
}

func (kl *Kubelet) allSourcesReady() bool {
Expand Down Expand Up @@ -2628,7 +2639,7 @@ var oldNodeUnschedulable bool
func (kl *Kubelet) syncNetworkStatus() {
var err error
if kl.configureCBR0 {
if kl.useDefaultOverlay {
if kl.flannelExperimentalOverlay {
podCIDR, err := kl.flannelHelper.Handshake()
if err != nil {
glog.Infof("Flannel server handshake failed %v", err)
Expand Down Expand Up @@ -2903,13 +2914,24 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
if node == nil {
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
}
// TODO: Actually update the node spec with pod cidr, this is currently a no-op.
if kl.useDefaultOverlay {
node.Spec.PodCIDR = kl.runtimeState.podCIDR()
// Flannel is the authoritative source of pod CIDR, if it's running.
// This is a short term compromise till we get flannel working in
// reservation mode.
if kl.flannelExperimentalOverlay {
flannelPodCIDR := kl.runtimeState.podCIDR()
if node.Spec.PodCIDR != flannelPodCIDR {
node.Spec.PodCIDR = flannelPodCIDR
glog.Infof("Updating podcidr to %v", node.Spec.PodCIDR)
if updatedNode, err := kl.kubeClient.Nodes().Update(node); err != nil {
glog.Warningf("Failed to update podCIDR: %v", err)
} else {
// Update the node resourceVersion so the status update doesn't fail.
node = updatedNode
}
}
} else if kl.reconcileCIDR {
kl.runtimeState.setPodCIDR(node.Spec.PodCIDR)
}
glog.Infof("Updating node in apiserver with cidr %v", node.Spec.PodCIDR)

if err := kl.setNodeStatus(node); err != nil {
return err
Expand Down

0 comments on commit 9aa0efa

Please sign in to comment.