Skip to content

Commit

Permalink
Introduce GRPC_TRACE_FLAG_ENABLED macro to mark trace branches unlikely.
Browse files Browse the repository at this point in the history
This is a trivial change and results in better code generation,
because it marks the trace path unlikely.

The changes are all mechanical, except I did some minor edits in
two macros in call_stack.h and http to apply best practices.
  • Loading branch information
soheilhy committed May 8, 2019
1 parent a8644bb commit 70d5e5a
Show file tree
Hide file tree
Showing 60 changed files with 451 additions and 426 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ static void partly_done(state_watcher* w, bool due_to_completion,
gpr_mu_lock(&w->mu);

if (due_to_completion) {
if (grpc_trace_operation_failures.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures)) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
Expand Down
106 changes: 53 additions & 53 deletions src/core/ext/filters/client_channel/client_channel.cc

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ HealthCheckClient::HealthCheckClient(
.set_jitter(HEALTH_CHECK_RECONNECT_JITTER)
.set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "created HealthCheckClient %p", this);
}
GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this,
Expand All @@ -72,7 +72,7 @@ HealthCheckClient::HealthCheckClient(
}

HealthCheckClient::~HealthCheckClient() {
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this);
}
GRPC_ERROR_UNREF(error_);
Expand All @@ -99,7 +99,7 @@ void HealthCheckClient::SetHealthStatus(grpc_connectivity_state state,

void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
grpc_error* error) {
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: setting state=%d error=%s", this,
state, grpc_error_string(error));
}
Expand All @@ -115,7 +115,7 @@ void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
}

void HealthCheckClient::Orphan() {
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: shutting down", this);
}
{
Expand Down Expand Up @@ -145,7 +145,7 @@ void HealthCheckClient::StartCallLocked() {
GPR_ASSERT(call_state_ == nullptr);
SetHealthStatusLocked(GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE);
call_state_ = MakeOrphanable<CallState>(Ref(), interested_parties_);
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: created CallState %p", this,
call_state_.get());
}
Expand All @@ -159,7 +159,7 @@ void HealthCheckClient::StartRetryTimer() {
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"health check call failed; will retry after backoff"));
grpc_millis next_try = retry_backoff_.NextAttemptTime();
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
Expand All @@ -184,7 +184,7 @@ void HealthCheckClient::OnRetryTimer(void* arg, grpc_error* error) {
self->retry_timer_callback_pending_ = false;
if (!self->shutting_down_ && error == GRPC_ERROR_NONE &&
self->call_state_ == nullptr) {
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: restarting health check call",
self);
}
Expand Down Expand Up @@ -285,7 +285,7 @@ HealthCheckClient::CallState::CallState(
payload_(context_) {}

HealthCheckClient::CallState::~CallState() {
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: destroying CallState %p",
health_check_client_.get(), this);
}
Expand Down Expand Up @@ -589,7 +589,7 @@ void HealthCheckClient::CallState::RecvTrailingMetadataReady(
status = grpc_get_status_code_from_metadata(
self->recv_trailing_metadata_.idx.named.grpc_status->md);
}
if (grpc_health_check_client_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO,
"HealthCheckClient %p CallState %p: health watch failed with "
"status %d",
Expand Down
34 changes: 17 additions & 17 deletions src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
Original file line number Diff line number Diff line change
Expand Up @@ -640,7 +640,7 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
// If this request is from the pending child policy, ignore it until
// it reports READY, at which point we swap it into place.
if (CalledByPendingChild()) {
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] pending child policy %p reports state=%s",
parent_.get(), this, parent_->pending_child_policy_.get(),
Expand Down Expand Up @@ -682,7 +682,7 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
if (parent_->serverlist_ == nullptr ||
(!parent_->serverlist_->ContainsAllDropEntries() &&
state != GRPC_CHANNEL_READY)) {
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] state=%s passing child picker %p as-is",
parent_.get(), this, grpc_connectivity_state_name(state),
Expand All @@ -692,7 +692,7 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
return;
}
// Cases 2 and 3a: wrap picker from the child in our own picker.
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p helper %p] state=%s wrapping child picker %p",
parent_.get(), this, grpc_connectivity_state_name(state),
picker.get());
Expand All @@ -715,7 +715,7 @@ void GrpcLb::Helper::RequestReresolution() {
? parent_->pending_child_policy_.get()
: parent_->child_policy_.get();
if (child_ != latest_child_policy) return;
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] Re-resolution requested from %schild policy (%p).",
parent_.get(), CalledByPendingChild() ? "pending " : "", child_);
Expand Down Expand Up @@ -802,7 +802,7 @@ void GrpcLb::BalancerCallState::Orphan() {

void GrpcLb::BalancerCallState::StartQuery() {
GPR_ASSERT(lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] lb_calld=%p: Starting LB call %p",
grpclb_policy_.get(), this, lb_call_);
}
Expand Down Expand Up @@ -1009,15 +1009,15 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
lb_calld->client_stats_report_interval_ = GPR_MAX(
GPR_MS_PER_SEC, grpc_grpclb_duration_to_millis(
&initial_response->client_stats_report_interval));
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64
" milliseconds",
grpclb_policy, lb_calld,
lb_calld->client_stats_report_interval_);
}
} else if (grpc_lb_glb_trace.enabled()) {
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response message; "
"client load reporting NOT enabled",
Expand All @@ -1030,7 +1030,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// Have seen initial response, look for serverlist.
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
auto serverlist_wrapper = MakeRefCounted<Serverlist>(serverlist);
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
UniquePtr<char> serverlist_text = serverlist_wrapper->AsText();
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Serverlist with %" PRIuPTR
Expand All @@ -1051,7 +1051,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// Check if the serverlist differs from the previous one.
if (grpclb_policy->serverlist_ != nullptr &&
*grpclb_policy->serverlist_ == *serverlist_wrapper) {
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Incoming server list identical to "
"current, ignoring.",
Expand Down Expand Up @@ -1129,7 +1129,7 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
char* status_details =
grpc_slice_to_c_string(lb_calld->lb_call_status_details_);
gpr_log(GPR_INFO,
Expand Down Expand Up @@ -1291,7 +1291,7 @@ GrpcLb::GrpcLb(Args args)
grpc_uri* uri = grpc_uri_parse(server_uri, true);
GPR_ASSERT(uri->path[0] != '\0');
server_name_ = gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] Will use '%s' as the server name for LB request.",
this, server_name_);
Expand Down Expand Up @@ -1535,7 +1535,7 @@ void GrpcLb::StartBalancerCallLocked() {
// Init the LB call data.
GPR_ASSERT(lb_calld_ == nullptr);
lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p)",
this, lb_channel_, lb_calld_.get());
Expand All @@ -1545,7 +1545,7 @@ void GrpcLb::StartBalancerCallLocked() {

void GrpcLb::StartBalancerCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) {
Expand All @@ -1572,7 +1572,7 @@ void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
grpclb_policy->retry_timer_callback_pending_ = false;
if (!grpclb_policy->shutting_down_ && error == GRPC_ERROR_NONE &&
grpclb_policy->lb_calld_ == nullptr) {
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server",
grpclb_policy);
}
Expand Down Expand Up @@ -1656,7 +1656,7 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
return nullptr;
}
helper->set_child(lb_policy.get());
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Created new child policy %s (%p)", this,
name, lb_policy.get());
}
Expand Down Expand Up @@ -1755,7 +1755,7 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
// Cases 1, 2b, and 3b: create a new child policy.
// If child_policy_ is null, we set it (case 1), else we set
// pending_child_policy_ (cases 2b and 3b).
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Creating new %schild policy %s", this,
child_policy_ == nullptr ? "" : "pending ", child_policy_name);
}
Expand All @@ -1779,7 +1779,7 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
}
GPR_ASSERT(policy_to_update != nullptr);
// Update the policy.
if (grpc_lb_glb_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Updating %schild policy %p", this,
policy_to_update == pending_child_policy_.get() ? "pending " : "",
policy_to_update);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,13 +160,13 @@ class PickFirst : public LoadBalancingPolicy {
};

PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) {
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p created.", this);
}
}

PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Destroying Pick First %p", this);
}
GPR_ASSERT(subchannel_list_ == nullptr);
Expand All @@ -175,7 +175,7 @@ PickFirst::~PickFirst() {

void PickFirst::ShutdownLocked() {
AutoChildRefsUpdater guard(this);
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
}
shutdown_ = true;
Expand Down Expand Up @@ -245,7 +245,7 @@ void PickFirst::UpdateChildRefsLocked() {

void PickFirst::UpdateLocked(UpdateArgs args) {
AutoChildRefsUpdater guard(this);
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p received update with %" PRIuPTR " addresses", this,
args.addresses.size());
Expand Down Expand Up @@ -317,7 +317,7 @@ void PickFirst::UpdateLocked(UpdateArgs args) {
// We do have a selected subchannel (which means it's READY), so keep
// using it until one of the subchannels in the new list reports READY.
if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
Expand Down Expand Up @@ -349,7 +349,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
GPR_ASSERT(connectivity_state != GRPC_CHANNEL_SHUTDOWN);
// Handle updates for the currently selected subchannel.
if (p->selected_ == this) {
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p selected subchannel connectivity changed to %s", p,
grpc_connectivity_state_name(connectivity_state));
Expand All @@ -358,7 +358,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
// pending update, switch to the pending update.
if (connectivity_state != GRPC_CHANNEL_READY &&
p->latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
Expand Down Expand Up @@ -492,7 +492,7 @@ void PickFirst::PickFirstSubchannelData::ProcessUnselectedReadyLocked() {
subchannel_list() == p->latest_pending_subchannel_list_.get());
// Case 2. Promote p->latest_pending_subchannel_list_ to p->subchannel_list_.
if (subchannel_list() == p->latest_pending_subchannel_list_.get()) {
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
Expand All @@ -506,7 +506,7 @@ void PickFirst::PickFirstSubchannelData::ProcessUnselectedReadyLocked() {
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY,
UniquePtr<SubchannelPicker>(New<Picker>(connected_subchannel()->Ref())));
if (grpc_lb_pick_first_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p, subchannel());
}
}
Expand Down
Loading

0 comments on commit 70d5e5a

Please sign in to comment.