Skip to content

Commit

Permalink
Sync node status from node controller to master.
Browse files Browse the repository at this point in the history
  • Loading branch information
ddysher committed Jan 30, 2015
1 parent e619f30 commit c793c4f
Show file tree
Hide file tree
Showing 16 changed files with 583 additions and 504 deletions.
4 changes: 2 additions & 2 deletions cmd/integration/integration.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,8 @@ func startComponents(manifestURL string) (apiServerURL string) {
controllerManager.Run(10 * time.Minute)

nodeResources := &api.NodeResources{}
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl)
nodeController.Run(10 * time.Second)
nodeController := nodeControllerPkg.NewNodeController(nil, "", machineList, nodeResources, cl, fakeKubeletClient{})
nodeController.Run(10*time.Second, 10)

// Kubelet (localhost)
testRootDir := makeTempDirOrDie("kubelet_integ_1.")
Expand Down
1 change: 0 additions & 1 deletion cmd/kube-apiserver/apiserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,6 @@ func main() {
Client: client,
Cloud: cloud,
EtcdHelper: helper,
HealthCheckMinions: *healthCheckMinions,
EventTTL: *eventTTL,
KubeletClient: kubeletClient,
PortalNet: &n,
Expand Down
18 changes: 13 additions & 5 deletions cmd/kube-controller-manager/controller-manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,18 +53,22 @@ var (
nodeSyncPeriod = flag.Duration("node_sync_period", 10*time.Second, ""+
"The period for syncing nodes from cloudprovider. Longer periods will result in "+
"fewer calls to cloud provider, but may delay addition of new nodes to cluster.")
resourceQuotaSyncPeriod = flag.Duration("resource_quota_sync_period", 10*time.Second, "The period for syncing quota usage status in the system")
registerRetryCount = flag.Int("register_retry_count", 10, ""+
"The number of retries for initial node registration. Retry interval equals node_sync_period.")
machineList util.StringList
// TODO: Discover these by pinging the host machines, and rip out these flags.
// TODO: in the meantime, use resource.QuantityFlag() instead of these
nodeMilliCPU = flag.Int64("node_milli_cpu", 1000, "The amount of MilliCPU provisioned on each node")
nodeMemory = resource.QuantityFlag("node_memory", "3Gi", "The amount of memory (in bytes) provisioned on each node")
resourceQuotaSyncPeriod = flag.Duration("resource_quota_sync_period", 10*time.Second, "The period for syncing quota usage status in the system")
nodeMilliCPU = flag.Int64("node_milli_cpu", 1000, "The amount of MilliCPU provisioned on each node")
nodeMemory = resource.QuantityFlag("node_memory", "3Gi", "The amount of memory (in bytes) provisioned on each node")
kubeletConfig = client.KubeletConfig{Port: ports.KubeletPort, EnableHttps: false}
)

func init() {
flag.Var(&address, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)")
flag.Var(&machineList, "machines", "List of machines to schedule onto, comma separated.")
client.BindClientConfigFlags(flag.CommandLine, clientConfig)
client.BindKubeletClientConfigFlags(flag.CommandLine, &kubeletConfig)
}

func verifyMinionFlags() {
Expand Down Expand Up @@ -104,15 +108,19 @@ func main() {
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient)
controllerManager.Run(10 * time.Second)

kubeletClient, err := client.NewKubeletClient(&kubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(*nodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: *nodeMemory,
},
}
nodeController := nodeControllerPkg.NewNodeController(cloud, *minionRegexp, machineList, nodeResources, kubeClient)
nodeController.Run(*nodeSyncPeriod)
nodeController := nodeControllerPkg.NewNodeController(cloud, *minionRegexp, machineList, nodeResources, kubeClient, kubeletClient)
nodeController.Run(*nodeSyncPeriod, *registerRetryCount)

resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
resourceQuotaManager.Run(*resourceQuotaSyncPeriod)
Expand Down
8 changes: 5 additions & 3 deletions pkg/api/validation/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -673,9 +673,11 @@ func ValidateMinionUpdate(oldMinion *api.Node, minion *api.Node) errs.Validation
allErrs := errs.ValidationErrorList{}
allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldMinion.ObjectMeta, &minion.ObjectMeta).Prefix("metadata")...)

if !api.Semantic.DeepEqual(minion.Status, api.NodeStatus{}) {
allErrs = append(allErrs, errs.NewFieldInvalid("status", minion.Status, "status must be empty"))
}
// TODO: Enable the code once we have better api object.status update model. Currently,
// anyone can update node status.
// if !api.Semantic.DeepEqual(minion.Status, api.NodeStatus{}) {
// allErrs = append(allErrs, errs.NewFieldInvalid("status", minion.Status, "status must be empty"))
// }

// TODO: move reset function to its own location
// Ignore metadata changes now that they have been tested
Expand Down
14 changes: 0 additions & 14 deletions pkg/api/validation/validation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1476,20 +1476,6 @@ func TestValidateMinionUpdate(t *testing.T) {
},
},
}, true},
{api.Node{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Labels: map[string]string{"bar": "foo"},
},
}, api.Node{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Labels: map[string]string{"bar": "fooobaz"},
},
Status: api.NodeStatus{
HostIP: "1.2.3.4",
},
}, false},
{api.Node{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Expand Down
Loading

0 comments on commit c793c4f

Please sign in to comment.