Skip to content

Commit

Permalink
should use time.Since instead of time.Now().Sub
Browse files Browse the repository at this point in the history
  • Loading branch information
wgliang committed Apr 10, 2018
1 parent a5c3c8d commit 8966928
Show file tree
Hide file tree
Showing 25 changed files with 39 additions and 38 deletions.
4 changes: 2 additions & 2 deletions pkg/cloudprovider/providers/gce/gce_op.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat
return wait.Poll(operationPollInterval, operationPollTimeoutDuration, func() (bool, error) {
start := time.Now()
gce.operationPollRateLimiter.Accept()
duration := time.Now().Sub(start)
duration := time.Since(start)
if duration > 5*time.Second {
glog.V(2).Infof("pollOperation: throttled %v for %v", duration, opName)
}
Expand All @@ -57,7 +57,7 @@ func (gce *GCECloud) waitForOp(op *computev1.Operation, getOperation func(operat

done := opIsDone(pollOp)
if done {
duration := time.Now().Sub(opStart)
duration := time.Since(opStart)
if duration > 1*time.Minute {
// Log the JSON. It's cleaner than the %v structure.
enc, err := pollOp.MarshalJSON()
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/bootstrap/tokencleaner.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (tc *TokenCleaner) processNextWorkItem() bool {
func (tc *TokenCleaner) syncFunc(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing secret %q (%v)", key, time.Since(startTime))
}()

namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/certificates/certificate_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func (cc *CertificateController) enqueueCertificateRequest(obj interface{}) {
func (cc *CertificateController) syncFunc(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing certificate request %q (%v)", key, time.Since(startTime))
}()
csr, err := cc.csrLister.Get(key)
if errors.IsNotFound(err) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/daemon/daemon_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1110,7 +1110,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash
func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Since(startTime))
}()

namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/disruption/disruption.go
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ func (dc *DisruptionController) processNextRecheckWorkItem() bool {
func (dc *DisruptionController) sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing PodDisruptionBudget %q (%v)", key, time.Since(startTime))
}()

namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/endpoint/endpoints_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
func (e *EndpointController) syncService(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Since(startTime))
}()

namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/job/job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) {
func (jm *JobController) syncJob(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()

ns, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/namespace/namespace_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func (nm *NamespaceController) worker() {
func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()

namespace, err := nm.lister.Get(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/resourcequota/resource_quota_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
}()

namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
8 changes: 4 additions & 4 deletions pkg/controller/route/route_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,13 +173,13 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
// Ensure that we don't have more than maxConcurrentRouteCreations
// CreateRoute calls in flight.
rateLimiter <- struct{}{}
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Now().Sub(startTime))
glog.Infof("Creating route for node %s %s with hint %s, throttled %v", nodeName, route.DestinationCIDR, nameHint, time.Since(startTime))
err := rc.routes.CreateRoute(context.TODO(), rc.clusterName, nameHint, route)
<-rateLimiter

rc.updateNetworkingCondition(nodeName, err == nil)
if err != nil {
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Now().Sub(startTime), err)
msg := fmt.Sprintf("Could not create route %s %s for node %s after %v: %v", nameHint, route.DestinationCIDR, nodeName, time.Since(startTime), err)
if rc.recorder != nil {
rc.recorder.Eventf(
&v1.ObjectReference{
Expand Down Expand Up @@ -218,9 +218,9 @@ func (rc *RouteController) reconcile(nodes []*v1.Node, routes []*cloudprovider.R
defer wg.Done()
glog.Infof("Deleting route %s %s", route.Name, route.DestinationCIDR)
if err := rc.routes.DeleteRoute(context.TODO(), rc.clusterName, route); err != nil {
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime), err)
glog.Errorf("Could not delete route %s %s after %v: %v", route.Name, route.DestinationCIDR, time.Since(startTime), err)
} else {
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Now().Sub(startTime))
glog.Infof("Deleted route %s %s after %v", route.Name, route.DestinationCIDR, time.Since(startTime))
}
}(route, time.Now())
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ func (c *ServiceAccountsController) processNextWorkItem() bool {
func (c *ServiceAccountsController) syncNamespace(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()

ns, err := c.nsLister.Get(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/statefulset/stateful_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ func (ssc *StatefulSetController) worker() {
func (ssc *StatefulSetController) sync(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Now().Sub(startTime))
glog.V(4).Infof("Finished syncing statefulset %q (%v)", key, time.Since(startTime))
}()

namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func (c *Controller) processPVC(pvcNamespace, pvcName string) error {
glog.V(4).Infof("Processing PVC %s/%s", pvcNamespace, pvcName)
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Now().Sub(startTime))
glog.V(4).Infof("Finished processing PVC %s/%s (%v)", pvcNamespace, pvcName, time.Since(startTime))
}()

pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (c *Controller) processPV(pvName string) error {
glog.V(4).Infof("Processing PV %s", pvName)
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Now().Sub(startTime))
glog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
}()

pv, err := c.pvLister.Get(pvName)
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/dockershim/libdocker/kube_docker_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ func (p *progressReporter) start() {
case <-ticker.C:
progress, timestamp := p.progress.get()
// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
if time.Now().Sub(timestamp) > p.imagePullProgressDeadline {
if time.Since(timestamp) > p.imagePullProgressDeadline {
glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
p.cancel()
return
Expand Down
3 changes: 2 additions & 1 deletion pkg/printers/internalversion/printers.go
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,8 @@ func translateTimestamp(timestamp metav1.Time) string {
if timestamp.IsZero() {
return "<unknown>"
}
return duration.ShortHumanDuration(time.Now().Sub(timestamp.Time))

return duration.ShortHumanDuration(time.Since(timestamp.Time))
}

var (
Expand Down
4 changes: 2 additions & 2 deletions pkg/proxy/userspace/roundrobin.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
Expand Down Expand Up @@ -378,7 +378,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/proxy/winuserspace/roundrobin.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func (lb *LoadBalancerRR) NextEndpoint(svcPort proxy.ServicePortName, srcAddr ne
}
if !sessionAffinityReset {
sessionAffinity, exists := state.affinity.affinityMap[ipaddr]
if exists && int(time.Now().Sub(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
if exists && int(time.Since(sessionAffinity.lastUsed).Seconds()) < state.affinity.ttlSeconds {
// Affinity wins.
endpoint := sessionAffinity.endpoint
sessionAffinity.lastUsed = time.Now()
Expand Down Expand Up @@ -366,7 +366,7 @@ func (lb *LoadBalancerRR) CleanupStaleStickySessions(svcPort proxy.ServicePortNa
return
}
for ip, affinity := range state.affinity.affinityMap {
if int(time.Now().Sub(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
if int(time.Since(affinity.lastUsed).Seconds()) >= state.affinity.ttlSeconds {
glog.V(4).Infof("Removing client %s from affinityMap for service %q", affinity.clientIP, svcPort)
delete(state.affinity.affinityMap, ip)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/ssh/ssh.go
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ func (l *SSHTunnelList) Dial(net, addr string) (net.Conn, error) {
id := mathrand.Int63() // So you can match begins/ends in the log.
glog.Infof("[%x: %v] Dialing...", id, addr)
defer func() {
glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Now().Sub(start))
glog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start))
}()
tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0])
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ func InstrumentRouteFunc(verb, resource, subresource, scope string, routeFunc re

routeFunc(request, response)

MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now))
MonitorRequest(request.Request, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now))
})
}

Expand All @@ -220,7 +220,7 @@ func InstrumentHandlerFunc(verb, resource, subresource, scope string, handler ht

handler(w, req)

MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Now().Sub(now))
MonitorRequest(req, verb, resource, subresource, scope, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now))
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (c *cachedGetter) Token() (string, error) {

var err error
// no token or exceeds the TTL
if c.token == "" || time.Now().Sub(c.born) > c.ttl {
if c.token == "" || time.Since(c.born) > c.ttl {
c.token, err = c.tokenGetter.Token()
if err != nil {
return "", fmt.Errorf("failed to get token: %s", err)
Expand Down
12 changes: 6 additions & 6 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime))
Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime))
return nil
}

Expand Down Expand Up @@ -3028,21 +3028,21 @@ func DeleteResourceAndPods(clientset clientset.Interface, internalClientset inte
if err := testutils.DeleteResourceUsingReaperWithRetries(internalClientset, kind, ns, name, nil, scaleClient); err != nil {
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
}
deleteTime := time.Now().Sub(startTime)
deleteTime := time.Since(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
terminatePodTime := time.Since(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
gcPodTime := time.Now().Sub(startTime) - terminatePodTime
gcPodTime := time.Since(startTime) - terminatePodTime
Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
return nil
}
Expand Down Expand Up @@ -3080,7 +3080,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
return err
}
deleteTime := time.Now().Sub(startTime)
deleteTime := time.Since(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)

var interval, timeout time.Duration
Expand All @@ -3104,7 +3104,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
terminatePodTime := time.Since(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)

err = waitForPodsGone(ps, interval, 10*time.Minute)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/scalability/density.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ func runDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPhaseTi
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.ClientSets[0], dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
startupTime := time.Since(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
Expand Down
4 changes: 2 additions & 2 deletions test/images/liveness/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ func main() {
started := time.Now()
http.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
data := (time.Now().Sub(started)).String()
data := (time.Since(started)).String()
w.Write([]byte(data))
})
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
duration := time.Now().Sub(started)
duration := time.Since(started)
if duration.Seconds() > 10 {
w.WriteHeader(500)
w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))
Expand Down
2 changes: 1 addition & 1 deletion test/images/resource-consumer/consume-cpu/consume_cpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func main() {
duration := time.Duration(*durationSec) * time.Second
start := time.Now()
first := systemstat.GetProcCPUSample()
for time.Now().Sub(start) < duration {
for time.Since(start) < duration {
cpu := systemstat.GetProcCPUAverage(first, systemstat.GetProcCPUSample(), systemstat.GetUptime().Uptime)
if cpu.TotalPct < millicoresPct {
doSomething()
Expand Down

0 comments on commit 8966928

Please sign in to comment.