forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request kubernetes#5750 from satnam6502/soak
Soak test based on serve_hostname image
- Loading branch information
Showing
2 changed files
with
212 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
all: | ||
go build serve_hostnames.go |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,210 @@ | ||
/* | ||
Copyright 2015 Google Inc. All rights reserved. | ||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
http://www.apache.org/licenses/LICENSE-2.0 | ||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
/* | ||
This soak tests places a specified number of pods on each node and then | ||
repeatedly sends queries to a service running on these pods via | ||
a serivce | ||
*/ | ||
|
||
package main | ||
|
||
import ( | ||
"flag" | ||
"fmt" | ||
"math/rand" | ||
"os" | ||
"path/filepath" | ||
"strconv" | ||
"time" | ||
|
||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" | ||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" | ||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" | ||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" | ||
"github.com/golang/glog" | ||
) | ||
|
||
var ( | ||
queries = flag.Int("queries", 1000, "Number of hostname queries to make in each iteration") | ||
perNode = flag.Int("per_node", 1, "Number of hostname pods per node") | ||
upTo = flag.Int("up_to", -1, "Number of iterations or -1 for no limit") | ||
) | ||
|
||
const podStartTimeout = 5 * time.Minute | ||
|
||
func main() { | ||
flag.Parse() | ||
|
||
glog.Infof("Starting serve_hostnames soak test with queries=%d and perNode=%d upTo=%d", | ||
*queries, *perNode, *upTo) | ||
|
||
settings, err := clientcmd.LoadFromFile(filepath.Join(os.Getenv("HOME"), ".kube", ".kubeconfig")) | ||
if err != nil { | ||
glog.Fatalf("Error loading configuration: %v", err.Error()) | ||
} | ||
config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig() | ||
if err != nil { | ||
glog.Fatalf("Failed to construct config: %v", err) | ||
} | ||
|
||
c, err := client.New(config) | ||
if err != nil { | ||
glog.Fatalf("Failed to make client: %v", err) | ||
} | ||
|
||
nodes, err := c.Nodes().List() | ||
if err != nil { | ||
glog.Fatalf("Failed to list nodes: %v", err) | ||
} | ||
|
||
if len(nodes.Items) == 0 { | ||
glog.Fatalf("Failed to find any nodes.") | ||
} | ||
|
||
glog.Infof("Nodes found on this cluster:") | ||
for i, node := range nodes.Items { | ||
glog.Infof("%d: %s", i, node.Name) | ||
} | ||
|
||
// Make a unique namespace for this test. | ||
r := rand.New(rand.NewSource(time.Now().UnixNano())) | ||
ns := "serve-hostnames-" + strconv.Itoa(r.Int()%10000) | ||
glog.Infof("Using namespace %s for this test.", ns) | ||
|
||
// Create a service for these pods. | ||
glog.Info("Creating service serve-hostnames") | ||
svc, err := c.Services(ns).Create(&api.Service{ | ||
ObjectMeta: api.ObjectMeta{ | ||
Name: "serve-hostnames", | ||
Labels: map[string]string{ | ||
"name": "serve-hostname", | ||
}, | ||
}, | ||
Spec: api.ServiceSpec{ | ||
Port: 9376, | ||
TargetPort: util.NewIntOrStringFromInt(9376), | ||
Selector: map[string]string{ | ||
"name": "serve-hostname", | ||
}, | ||
}, | ||
}) | ||
if err != nil { | ||
glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err) | ||
return | ||
} | ||
// Clean up service | ||
defer func() { | ||
glog.Info("Cleaning up service") | ||
if err := c.Services(ns).Delete(svc.Name); err != nil { | ||
glog.Warningf("unable to delete service %s/%s: %v", ns, svc.Name, err) | ||
} | ||
}() | ||
|
||
// Put serve-hostname pods on each node. | ||
podNames := []string{} | ||
for i, node := range nodes.Items { | ||
for j := 0; j < *perNode; j++ { | ||
podName := fmt.Sprintf("serve-hostname-%d-%d", i, j) | ||
podNames = append(podNames, podName) | ||
glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name) | ||
_, err := c.Pods(ns).Create(&api.Pod{ | ||
ObjectMeta: api.ObjectMeta{ | ||
Name: podName, | ||
Labels: map[string]string{ | ||
"name": "serve-hostname", | ||
}, | ||
}, | ||
Spec: api.PodSpec{ | ||
Containers: []api.Container{ | ||
{ | ||
Name: "serve-hostname", | ||
Image: "kubernetes/serve_hostname:1.1", | ||
Ports: []api.ContainerPort{{ContainerPort: 9376}}, | ||
}, | ||
}, | ||
Host: node.Name, | ||
}, | ||
}) | ||
if err != nil { | ||
glog.Warningf("Failed to create pod %s: %v", podName, err) | ||
return | ||
} | ||
} | ||
} | ||
// Clean up the pods | ||
defer func() { | ||
glog.Info("Cleaning up pods") | ||
for _, podName := range podNames { | ||
if err := c.Pods(ns).Delete(podName); err != nil { | ||
glog.Warningf("Failed to delete pod %s: %v", podName, err) | ||
} | ||
} | ||
}() | ||
|
||
glog.Info("Waiting for the serve-hostname pods to be ready") | ||
for _, podName := range podNames { | ||
var pod *api.Pod | ||
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) { | ||
pod, err = c.Pods(ns).Get(podName) | ||
if err != nil { | ||
glog.Infof("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout) | ||
continue | ||
} | ||
if pod.Status.Phase == api.PodRunning { | ||
break | ||
} | ||
} | ||
if pod.Status.Phase != api.PodRunning { | ||
glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase) | ||
return | ||
} | ||
glog.Infof("%s/%s is running", ns, podName) | ||
} | ||
|
||
// Repeatedly make requests. | ||
for iteration := 0; iteration != *upTo; iteration++ { | ||
responses := make(map[string]int, *perNode*len(nodes.Items)) | ||
start := time.Now() | ||
for q := 0; q < *queries; q++ { | ||
hostname, err := c.Get(). | ||
Namespace(ns). | ||
Prefix("proxy"). | ||
Resource("services"). | ||
Name("serve-hostnames"). | ||
DoRaw() | ||
if err != nil { | ||
glog.Infof("Call failed during iteration %d query %d : %v", iteration, q, err) | ||
} else { | ||
responses[string(hostname)]++ | ||
} | ||
|
||
} | ||
for k, v := range responses { | ||
glog.Infof("%s: %d ", k, v) | ||
} | ||
// Report any nodes that did not respond. | ||
for n, node := range nodes.Items { | ||
for i := 0; i < *perNode; i++ { | ||
name := fmt.Sprintf("serve-hostname-%d-%d", n, i) | ||
if _, ok := responses[name]; !ok { | ||
glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration) | ||
} | ||
} | ||
} | ||
glog.Infof("Iteration %d took %v for %d queries", iteration, time.Since(start), *queries) | ||
} | ||
} |