Skip to content

Commit

Permalink
Updating APM unittests on the echo metrics.
Browse files Browse the repository at this point in the history
There were a series of changes in the calculation of echo metrics. There changes made the existing unittests lose, e.g., EXPECT_EQ become EXPECT_NEAR. It is good time to protect the echo calculation more strictly.

The change is not simply generating a new reference file and change EXPECT_NEAR to EXPECT_EQ. It strengthens the test as well. Main changes are

1. the old test only sample a metric at the end of processing, while the new test takes metrics during the call with a certain time interval. This gives a much stronger protection.

2. added protection of a newly added metric, called divergent_filter_fraction.

3. as said, use EXPECT_EQ (actually ASSERT_EQ) instead of EXPECT_NEAR as much as possible, even for float point values. This may be too restrictive. But it can be good to be restrictive at the beginning.

BUG=

Review-Url: https://codereview.webrtc.org/1969403003
Cr-Commit-Position: refs/heads/master@{#12871}
  • Loading branch information
minyuel authored and Commit bot committed May 24, 2016
1 parent d36df89 commit 58530ed
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 65 deletions.
Binary file modified data/audio_processing/output_data_float.pb
Binary file not shown.
Binary file modified data/audio_processing/output_data_mac.pb
Binary file not shown.
139 changes: 77 additions & 62 deletions webrtc/modules/audio_processing/audio_processing_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -204,10 +204,10 @@ int16_t MaxAudioFrame(const AudioFrame& frame) {
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
void TestStats(const AudioProcessing::Statistic& test,
const audioproc::Test::Statistic& reference) {
EXPECT_NEAR(reference.instant(), test.instant, 2);
EXPECT_NEAR(reference.average(), test.average, 2);
EXPECT_NEAR(reference.maximum(), test.maximum, 3);
EXPECT_NEAR(reference.minimum(), test.minimum, 2);
EXPECT_EQ(reference.instant(), test.instant);
EXPECT_EQ(reference.average(), test.average);
EXPECT_EQ(reference.maximum(), test.maximum);
EXPECT_EQ(reference.minimum(), test.minimum);
}

void WriteStatsMessage(const AudioProcessing::Statistic& output,
Expand All @@ -221,7 +221,6 @@ void WriteStatsMessage(const AudioProcessing::Statistic& output,

void OpenFileAndWriteMessage(const std::string filename,
const ::google::protobuf::MessageLite& msg) {
#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
FILE* file = fopen(filename.c_str(), "wb");
ASSERT_TRUE(file != NULL);

Expand All @@ -234,10 +233,6 @@ void OpenFileAndWriteMessage(const std::string filename,
ASSERT_EQ(static_cast<size_t>(size),
fwrite(array.get(), sizeof(array[0]), size, file));
fclose(file);
#else
std::cout << "Warning: Writing new reference is only allowed on Linux!"
<< std::endl;
#endif
}

std::string ResourceFilePath(std::string name, int sample_rate_hz) {
Expand Down Expand Up @@ -2101,6 +2096,9 @@ TEST_F(ApmTest, Process) {
int analog_level_average = 0;
int max_output_average = 0;
float ns_speech_prob_average = 0.0f;
#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
int stats_index = 0;
#endif

while (ReadFrame(far_file_, revframe_) && ReadFrame(near_file_, frame_)) {
EXPECT_EQ(apm_->kNoError, apm_->ProcessReverseStream(revframe_));
Expand Down Expand Up @@ -2148,27 +2146,81 @@ TEST_F(ApmTest, Process) {
// Reset in case of downmixing.
frame_->num_channels_ = static_cast<size_t>(test->num_input_channels());
frame_count++;

#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
const int kStatsAggregationFrameNum = 100; // 1 second.
if (frame_count % kStatsAggregationFrameNum == 0) {
// Get echo metrics.
EchoCancellation::Metrics echo_metrics;
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->GetMetrics(&echo_metrics));

// Get delay metrics.
int median = 0;
int std = 0;
float fraction_poor_delays = 0;
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->GetDelayMetrics(
&median, &std, &fraction_poor_delays));

// Get RMS.
int rms_level = apm_->level_estimator()->RMS();
EXPECT_LE(0, rms_level);
EXPECT_GE(127, rms_level);

if (!write_ref_data) {
const audioproc::Test::EchoMetrics& reference =
test->echo_metrics(stats_index);
TestStats(echo_metrics.residual_echo_return_loss,
reference.residual_echo_return_loss());
TestStats(echo_metrics.echo_return_loss,
reference.echo_return_loss());
TestStats(echo_metrics.echo_return_loss_enhancement,
reference.echo_return_loss_enhancement());
TestStats(echo_metrics.a_nlp,
reference.a_nlp());
EXPECT_EQ(echo_metrics.divergent_filter_fraction,
reference.divergent_filter_fraction());

const audioproc::Test::DelayMetrics& reference_delay =
test->delay_metrics(stats_index);
EXPECT_EQ(reference_delay.median(), median);
EXPECT_EQ(reference_delay.std(), std);
EXPECT_EQ(reference_delay.fraction_poor_delays(),
fraction_poor_delays);

EXPECT_EQ(test->rms_level(stats_index), rms_level);

++stats_index;
} else {
audioproc::Test::EchoMetrics* message =
test->add_echo_metrics();
WriteStatsMessage(echo_metrics.residual_echo_return_loss,
message->mutable_residual_echo_return_loss());
WriteStatsMessage(echo_metrics.echo_return_loss,
message->mutable_echo_return_loss());
WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
message->mutable_echo_return_loss_enhancement());
WriteStatsMessage(echo_metrics.a_nlp,
message->mutable_a_nlp());
message->set_divergent_filter_fraction(
echo_metrics.divergent_filter_fraction);

audioproc::Test::DelayMetrics* message_delay =
test->add_delay_metrics();
message_delay->set_median(median);
message_delay->set_std(std);
message_delay->set_fraction_poor_delays(fraction_poor_delays);

test->add_rms_level(rms_level);
}
}
#endif // defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE).
}
max_output_average /= frame_count;
analog_level_average /= frame_count;
ns_speech_prob_average /= frame_count;

#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
EchoCancellation::Metrics echo_metrics;
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->GetMetrics(&echo_metrics));
int median = 0;
int std = 0;
float fraction_poor_delays = 0;
EXPECT_EQ(apm_->kNoError,
apm_->echo_cancellation()->GetDelayMetrics(
&median, &std, &fraction_poor_delays));

int rms_level = apm_->level_estimator()->RMS();
EXPECT_LE(0, rms_level);
EXPECT_GE(127, rms_level);
#endif

if (!write_ref_data) {
const int kIntNear = 1;
// When running the test on a N7 we get a {2, 6} difference of
Expand Down Expand Up @@ -2198,27 +2250,8 @@ TEST_F(ApmTest, Process) {
EXPECT_NEAR(test->max_output_average(),
max_output_average - kMaxOutputAverageOffset,
kMaxOutputAverageNear);

#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
audioproc::Test::EchoMetrics reference = test->echo_metrics();
TestStats(echo_metrics.residual_echo_return_loss,
reference.residual_echo_return_loss());
TestStats(echo_metrics.echo_return_loss,
reference.echo_return_loss());
TestStats(echo_metrics.echo_return_loss_enhancement,
reference.echo_return_loss_enhancement());
TestStats(echo_metrics.a_nlp,
reference.a_nlp());

const double kFloatNear = 0.0005;
audioproc::Test::DelayMetrics reference_delay = test->delay_metrics();
EXPECT_NEAR(reference_delay.median(), median, kIntNear);
EXPECT_NEAR(reference_delay.std(), std, kIntNear);
EXPECT_NEAR(reference_delay.fraction_poor_delays(), fraction_poor_delays,
kFloatNear);

EXPECT_NEAR(test->rms_level(), rms_level, kIntNear);

EXPECT_NEAR(test->ns_speech_probability_average(),
ns_speech_prob_average,
kFloatNear);
Expand All @@ -2232,24 +2265,6 @@ TEST_F(ApmTest, Process) {
test->set_max_output_average(max_output_average);

#if defined(WEBRTC_AUDIOPROC_FLOAT_PROFILE)
audioproc::Test::EchoMetrics* message = test->mutable_echo_metrics();
WriteStatsMessage(echo_metrics.residual_echo_return_loss,
message->mutable_residual_echo_return_loss());
WriteStatsMessage(echo_metrics.echo_return_loss,
message->mutable_echo_return_loss());
WriteStatsMessage(echo_metrics.echo_return_loss_enhancement,
message->mutable_echo_return_loss_enhancement());
WriteStatsMessage(echo_metrics.a_nlp,
message->mutable_a_nlp());

audioproc::Test::DelayMetrics* message_delay =
test->mutable_delay_metrics();
message_delay->set_median(median);
message_delay->set_std(std);
message_delay->set_fraction_poor_delays(fraction_poor_delays);

test->set_rms_level(rms_level);

EXPECT_LE(0.0f, ns_speech_prob_average);
EXPECT_GE(1.0f, ns_speech_prob_average);
test->set_ns_speech_probability_average(ns_speech_prob_average);
Expand Down
7 changes: 4 additions & 3 deletions webrtc/modules/audio_processing/test/unittest.proto
Original file line number Diff line number Diff line change
Expand Up @@ -32,19 +32,20 @@ message Test {
optional Statistic echo_return_loss = 2;
optional Statistic echo_return_loss_enhancement = 3;
optional Statistic a_nlp = 4;
optional float divergent_filter_fraction = 5;
}

optional EchoMetrics echo_metrics = 11;
repeated EchoMetrics echo_metrics = 11;

message DelayMetrics {
optional int32 median = 1;
optional int32 std = 2;
optional float fraction_poor_delays = 3;
}

optional DelayMetrics delay_metrics = 12;
repeated DelayMetrics delay_metrics = 12;

optional int32 rms_level = 13;
repeated int32 rms_level = 13;

optional float ns_speech_probability_average = 14;

Expand Down

0 comments on commit 58530ed

Please sign in to comment.