summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/webrtc/modules/audio_processing
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/webrtc/modules/audio_processing')
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/OWNERS6
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/Android.mk9
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c102
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h31
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h19
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_mips.c774
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_neon.c304
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.c15
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.h21
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft_mips.c1213
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c58
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h40
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc21
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core.c3
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core_c.c2
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c29
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h44
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.c16
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.h1
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/agc/digital_agc.c11
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc496
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h117
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi50
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc646
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h132
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc74
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi28
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/common.h76
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/debug.proto19
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc70
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h25
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc31
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h35
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc27
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h11
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc43
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h11
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets.gyp45
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets_chromium.gyp45
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc20
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.h10
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h164
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h54
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.cc132
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.h12
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/lib_core_neon_offsets.gypi51
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.cc35
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h12
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c4
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c78
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c279
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h37
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c273
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c1008
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/processing_component.cc27
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/processing_component.h8
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/rms_level.cc61
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/rms_level.h57
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc33
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h63
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/typing_detection.cc90
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/typing_detection.h93
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.c106
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.h74
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_unittest.cc143
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.c67
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h87
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/utility/ring_buffer_unittest.cc11
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc27
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h10
72 files changed, 6237 insertions, 1605 deletions
diff --git a/chromium/third_party/webrtc/modules/audio_processing/OWNERS b/chromium/third_party/webrtc/modules/audio_processing/OWNERS
index 5a2563444b6..7c1f7881c2c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/OWNERS
+++ b/chromium/third_party/webrtc/modules/audio_processing/OWNERS
@@ -1,2 +1,8 @@
+aluebs@webrtc.org
andrew@webrtc.org
bjornv@webrtc.org
+
+# These are for the common case of adding or renaming files. If you're doing
+# structural changes, please get a review from a reviewer in this file.
+per-file *.gyp=*
+per-file *.gypi=*
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/Android.mk b/chromium/third_party/webrtc/modules/audio_processing/aec/Android.mk
index 3ad52b96625..181e87d9a76 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/Android.mk
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/Android.mk
@@ -47,3 +47,12 @@ ifndef NDK_ROOT
include external/stlport/libstlport.mk
endif
include $(BUILD_STATIC_LIBRARY)
+
+#########################
+# Build the neon library.
+ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
+
+LOCAL_SRC_FILES += \
+ aec_core_neon.c
+
+endif # ifeq ($(WEBRTC_BUILD_NEON_LIBS),true)
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c
index bfa087c23de..207c6dc3bfc 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.c
@@ -67,7 +67,7 @@ static const float sqrtHanning[65] = {
// Matlab code to produce table:
// weightCurve = [0 ; 0.3 * sqrt(linspace(0,1,64))' + 0.1];
// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', weightCurve);
-const float WebRtcAec_weightCurve[65] = {
+ALIGN16_BEG const float ALIGN16_END WebRtcAec_weightCurve[65] = {
0.0000f, 0.1000f, 0.1378f, 0.1535f, 0.1655f, 0.1756f, 0.1845f, 0.1926f,
0.2000f, 0.2069f, 0.2134f, 0.2195f, 0.2254f, 0.2309f, 0.2363f, 0.2414f,
0.2464f, 0.2512f, 0.2558f, 0.2604f, 0.2648f, 0.2690f, 0.2732f, 0.2773f,
@@ -81,7 +81,7 @@ const float WebRtcAec_weightCurve[65] = {
// Matlab code to produce table:
// overDriveCurve = [sqrt(linspace(0,1,65))' + 1];
// fprintf(1, '\t%.4f, %.4f, %.4f, %.4f, %.4f, %.4f,\n', overDriveCurve);
-const float WebRtcAec_overDriveCurve[65] = {
+ALIGN16_BEG const float ALIGN16_END WebRtcAec_overDriveCurve[65] = {
1.0000f, 1.1250f, 1.1768f, 1.2165f, 1.2500f, 1.2795f, 1.3062f, 1.3307f,
1.3536f, 1.3750f, 1.3953f, 1.4146f, 1.4330f, 1.4507f, 1.4677f, 1.4841f,
1.5000f, 1.5154f, 1.5303f, 1.5449f, 1.5590f, 1.5728f, 1.5863f, 1.5995f,
@@ -116,7 +116,7 @@ extern int webrtc_aec_instance_count;
// "Private" function prototypes.
static void ProcessBlock(AecCore* aec);
-static void NonLinearProcessing(AecCore* aec, short* output, short* outputH);
+static void NonLinearProcessing(AecCore* aec, float* output, float* outputH);
static void GetHighbandGain(const float* lambda, float* nlpGainHband);
@@ -160,28 +160,28 @@ int WebRtcAec_CreateAec(AecCore** aecInst) {
return -1;
}
- aec->nearFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ aec->nearFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float));
if (!aec->nearFrBuf) {
WebRtcAec_FreeAec(aec);
aec = NULL;
return -1;
}
- aec->outFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ aec->outFrBuf = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float));
if (!aec->outFrBuf) {
WebRtcAec_FreeAec(aec);
aec = NULL;
return -1;
}
- aec->nearFrBufH = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ aec->nearFrBufH = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float));
if (!aec->nearFrBufH) {
WebRtcAec_FreeAec(aec);
aec = NULL;
return -1;
}
- aec->outFrBufH = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(int16_t));
+ aec->outFrBufH = WebRtc_CreateBuffer(FRAME_LEN + PART_LEN, sizeof(float));
if (!aec->outFrBufH) {
WebRtcAec_FreeAec(aec);
aec = NULL;
@@ -419,6 +419,7 @@ WebRtcAec_FilterFar_t WebRtcAec_FilterFar;
WebRtcAec_ScaleErrorSignal_t WebRtcAec_ScaleErrorSignal;
WebRtcAec_FilterAdaptation_t WebRtcAec_FilterAdaptation;
WebRtcAec_OverdriveAndSuppress_t WebRtcAec_OverdriveAndSuppress;
+WebRtcAec_ComfortNoise_t WebRtcAec_ComfortNoise;
int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
int i;
@@ -472,9 +473,21 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
aec->delay_logging_enabled = 0;
memset(aec->delay_histogram, 0, sizeof(aec->delay_histogram));
+ aec->reported_delay_enabled = 1;
aec->extended_filter_enabled = 0;
aec->num_partitions = kNormalNumPartitions;
+ // Update the delay estimator with filter length. We use half the
+ // |num_partitions| to take the echo path into account. In practice we say
+ // that the echo has a duration of maximum half |num_partitions|, which is not
+ // true, but serves as a crude measure.
+ WebRtc_set_allowed_offset(aec->delay_estimator, aec->num_partitions / 2);
+ // TODO(bjornv): I currently hard coded the enable. Once we've established
+ // that AECM has no performance regression, robust_validation will be enabled
+ // all the time and the APIs to turn it on/off will be removed. Hence, remove
+ // this line then.
+ WebRtc_enable_robust_validation(aec->delay_estimator, 1);
+
// Default target suppression mode.
aec->nlp_mode = 1;
@@ -557,6 +570,7 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
WebRtcAec_ScaleErrorSignal = ScaleErrorSignal;
WebRtcAec_FilterAdaptation = FilterAdaptation;
WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppress;
+ WebRtcAec_ComfortNoise = ComfortNoise;
#if defined(WEBRTC_ARCH_X86_FAMILY)
if (WebRtc_GetCPUInfo(kSSE2)) {
@@ -564,6 +578,14 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
}
#endif
+#if defined(MIPS_FPU_LE)
+ WebRtcAec_InitAec_mips();
+#endif
+
+#if defined(WEBRTC_DETECT_ARM_NEON) || defined(WEBRTC_ARCH_ARM_NEON)
+ WebRtcAec_InitAec_neon();
+#endif
+
aec_rdft_init();
return 0;
@@ -599,11 +621,11 @@ int WebRtcAec_MoveFarReadPtr(AecCore* aec, int elements) {
}
void WebRtcAec_ProcessFrame(AecCore* aec,
- const short* nearend,
- const short* nearendH,
+ const float* nearend,
+ const float* nearendH,
int knownDelay,
- int16_t* out,
- int16_t* outH) {
+ float* out,
+ float* outH) {
int out_elements = 0;
// For each frame the process is as follows:
@@ -724,7 +746,7 @@ int WebRtcAec_GetDelayMetricsCore(AecCore* self, int* median, int* std) {
// Calculate the L1 norm, with median value as central moment.
for (i = 0; i < kHistorySizeBlocks; i++) {
- l1_norm += (float)(fabs(i - my_median) * self->delay_histogram[i]);
+ l1_norm += (float)abs(i - my_median) * self->delay_histogram[i];
}
*std = (int)(l1_norm / (float)num_delay_values + 0.5f) * kMsPerBlock;
@@ -768,9 +790,19 @@ void WebRtcAec_SetConfigCore(AecCore* self,
}
}
+void WebRtcAec_enable_reported_delay(AecCore* self, int enable) {
+ self->reported_delay_enabled = enable;
+}
+
+int WebRtcAec_reported_delay_enabled(AecCore* self) {
+ return self->reported_delay_enabled;
+}
+
void WebRtcAec_enable_delay_correction(AecCore* self, int enable) {
self->extended_filter_enabled = enable;
self->num_partitions = enable ? kExtendedNumPartitions : kNormalNumPartitions;
+ // Update the delay estimator with filter length. See InitAEC() for details.
+ WebRtc_set_allowed_offset(self->delay_estimator, self->num_partitions / 2);
}
int WebRtcAec_delay_correction_enabled(AecCore* self) {
@@ -786,7 +818,7 @@ void WebRtcAec_SetSystemDelay(AecCore* self, int delay) {
static void ProcessBlock(AecCore* aec) {
int i;
- float d[PART_LEN], y[PART_LEN], e[PART_LEN], dH[PART_LEN];
+ float y[PART_LEN], e[PART_LEN];
float scale;
float fft[PART_LEN2];
@@ -805,30 +837,22 @@ static void ProcessBlock(AecCore* aec) {
const float ramp = 1.0002f;
const float gInitNoise[2] = {0.999f, 0.001f};
- int16_t nearend[PART_LEN];
- int16_t* nearend_ptr = NULL;
- int16_t output[PART_LEN];
- int16_t outputH[PART_LEN];
+ float nearend[PART_LEN];
+ float* nearend_ptr = NULL;
+ float output[PART_LEN];
+ float outputH[PART_LEN];
float* xf_ptr = NULL;
- memset(dH, 0, sizeof(dH));
+ // Concatenate old and new nearend blocks.
if (aec->sampFreq == 32000) {
- // Get the upper band first so we can reuse |nearend|.
WebRtc_ReadBuffer(aec->nearFrBufH, (void**)&nearend_ptr, nearend, PART_LEN);
- for (i = 0; i < PART_LEN; i++) {
- dH[i] = (float)(nearend_ptr[i]);
- }
- memcpy(aec->dBufH + PART_LEN, dH, sizeof(float) * PART_LEN);
+ memcpy(aec->dBufH + PART_LEN, nearend_ptr, sizeof(nearend));
}
WebRtc_ReadBuffer(aec->nearFrBuf, (void**)&nearend_ptr, nearend, PART_LEN);
+ memcpy(aec->dBuf + PART_LEN, nearend_ptr, sizeof(nearend));
// ---------- Ooura fft ----------
- // Concatenate old and new nearend blocks.
- for (i = 0; i < PART_LEN; i++) {
- d[i] = (float)(nearend_ptr[i]);
- }
- memcpy(aec->dBuf + PART_LEN, d, sizeof(float) * PART_LEN);
#ifdef WEBRTC_AEC_DEBUG_DUMP
{
@@ -940,7 +964,7 @@ static void ProcessBlock(AecCore* aec) {
}
for (i = 0; i < PART_LEN; i++) {
- e[i] = d[i] - y[i];
+ e[i] = nearend_ptr[i] - y[i];
}
// Error fft
@@ -999,7 +1023,7 @@ static void ProcessBlock(AecCore* aec) {
#endif
}
-static void NonLinearProcessing(AecCore* aec, short* output, short* outputH) {
+static void NonLinearProcessing(AecCore* aec, float* output, float* outputH) {
float efw[2][PART_LEN1], dfw[2][PART_LEN1], xfw[2][PART_LEN1];
complex_t comfortNoiseHband[PART_LEN1];
float fft[PART_LEN2];
@@ -1266,7 +1290,7 @@ static void NonLinearProcessing(AecCore* aec, short* output, short* outputH) {
WebRtcAec_OverdriveAndSuppress(aec, hNl, hNlFb, efw);
// Add comfort noise.
- ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);
+ WebRtcAec_ComfortNoise(aec, efw, comfortNoiseHband, aec->noisePow, hNl);
// TODO(bjornv): Investigate how to take the windowing below into account if
// needed.
@@ -1293,12 +1317,12 @@ static void NonLinearProcessing(AecCore* aec, short* output, short* outputH) {
fft[i] *= scale; // fft scaling
fft[i] = fft[i] * sqrtHanning[i] + aec->outBuf[i];
- // Saturation protection
- output[i] = (short)WEBRTC_SPL_SAT(
- WEBRTC_SPL_WORD16_MAX, fft[i], WEBRTC_SPL_WORD16_MIN);
-
fft[PART_LEN + i] *= scale; // fft scaling
aec->outBuf[i] = fft[PART_LEN + i] * sqrtHanning[PART_LEN - i];
+
+ // Saturate output to keep it in the allowed range.
+ output[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, fft[i], WEBRTC_SPL_WORD16_MIN);
}
// For H band
@@ -1323,8 +1347,8 @@ static void NonLinearProcessing(AecCore* aec, short* output, short* outputH) {
// compute gain factor
for (i = 0; i < PART_LEN; i++) {
- dtmp = (float)aec->dBufH[i];
- dtmp = (float)dtmp * nlpGainHband; // for variable gain
+ dtmp = aec->dBufH[i];
+ dtmp = dtmp * nlpGainHband; // for variable gain
// add some comfort noise where Hband is attenuated
if (flagHbandCn == 1) {
@@ -1332,8 +1356,8 @@ static void NonLinearProcessing(AecCore* aec, short* output, short* outputH) {
dtmp += cnScaleHband * fft[i];
}
- // Saturation protection
- outputH[i] = (short)WEBRTC_SPL_SAT(
+ // Saturate output to keep it in the allowed range.
+ outputH[i] = WEBRTC_SPL_SAT(
WEBRTC_SPL_WORD16_MAX, dtmp, WEBRTC_SPL_WORD16_MIN);
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h
index d3c6d7e2b2e..93bfed46688 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core.h
@@ -22,17 +22,6 @@
#define PART_LEN1 (PART_LEN + 1) // Unique fft coefficients
#define PART_LEN2 (PART_LEN * 2) // Length of partition * 2
-// Delay estimator constants, used for logging.
-enum {
- kMaxDelayBlocks = 60
-};
-enum {
- kLookaheadBlocks = 15
-};
-enum {
- kHistorySizeBlocks = kMaxDelayBlocks + kLookaheadBlocks
-};
-
typedef float complex_t[2];
// For performance reasons, some arrays of complex numbers are replaced by twice
// as long arrays of float, all the real parts followed by all the imaginary
@@ -65,14 +54,20 @@ int WebRtcAec_CreateAec(AecCore** aec);
int WebRtcAec_FreeAec(AecCore* aec);
int WebRtcAec_InitAec(AecCore* aec, int sampFreq);
void WebRtcAec_InitAec_SSE2(void);
+#if defined(MIPS_FPU_LE)
+void WebRtcAec_InitAec_mips(void);
+#endif
+#if defined(WEBRTC_DETECT_ARM_NEON) || defined(WEBRTC_ARCH_ARM_NEON)
+void WebRtcAec_InitAec_neon(void);
+#endif
void WebRtcAec_BufferFarendPartition(AecCore* aec, const float* farend);
void WebRtcAec_ProcessFrame(AecCore* aec,
- const short* nearend,
- const short* nearendH,
+ const float* nearend,
+ const float* nearendH,
int knownDelay,
- int16_t* out,
- int16_t* outH);
+ float* out,
+ float* outH);
// A helper function to call WebRtc_MoveReadPtr() for all far-end buffers.
// Returns the number of elements moved, and adjusts |system_delay| by the
@@ -101,6 +96,12 @@ void WebRtcAec_SetConfigCore(AecCore* self,
int metrics_mode,
int delay_logging);
+// Non-zero enables, zero disables.
+void WebRtcAec_enable_reported_delay(AecCore* self, int enable);
+
+// Returns non-zero if reported delay is enabled and zero if disabled.
+int WebRtcAec_reported_delay_enabled(AecCore* self);
+
// We now interpret delay correction to mean an extended filter length feature.
// We reuse the delay correction infrastructure to avoid changes through to
// libjingle. See details along with |DelayCorrection| in
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h
index 193369382ca..1c560f91c9c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_internal.h
@@ -26,6 +26,17 @@ enum {
};
static const int kNormalNumPartitions = 12;
+// Delay estimator constants, used for logging.
+enum {
+ kMaxDelayBlocks = 60
+};
+enum {
+ kLookaheadBlocks = 15
+};
+enum {
+ kHistorySizeBlocks = kMaxDelayBlocks + kLookaheadBlocks
+};
+
// Extended filter adaptation parameters.
// TODO(ajm): No narrowband tuning yet.
static const float kExtendedMu = 0.4f;
@@ -122,6 +133,7 @@ struct AecCore {
void* delay_estimator_farend;
void* delay_estimator;
+ int reported_delay_enabled; // 0 = disabled, otherwise enabled.
// 1 = extended filter mode enabled, 0 = disabled.
int extended_filter_enabled;
// Runtime selection of number of filter partitions.
@@ -151,4 +163,11 @@ typedef void (*WebRtcAec_OverdriveAndSuppress_t)(AecCore* aec,
float efw[2][PART_LEN1]);
extern WebRtcAec_OverdriveAndSuppress_t WebRtcAec_OverdriveAndSuppress;
+typedef void (*WebRtcAec_ComfortNoise_t)(AecCore* aec,
+ float efw[2][PART_LEN1],
+ complex_t* comfortNoiseHband,
+ const float* noisePow,
+ const float* lambda);
+extern WebRtcAec_ComfortNoise_t WebRtcAec_ComfortNoise;
+
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_AEC_CORE_INTERNAL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_mips.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_mips.c
new file mode 100644
index 00000000000..d861e10f908
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_mips.c
@@ -0,0 +1,774 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The core AEC algorithm, which is presented with time-aligned signals.
+ */
+
+#include "webrtc/modules/audio_processing/aec/aec_core.h"
+
+#include <math.h>
+
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
+#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
+
+static const int flagHbandCn = 1; // flag for adding comfort noise in H band
+extern const float WebRtcAec_weightCurve[65];
+extern const float WebRtcAec_overDriveCurve[65];
+
+void WebRtcAec_ComfortNoise_mips(AecCore* aec,
+ float efw[2][PART_LEN1],
+ complex_t* comfortNoiseHband,
+ const float* noisePow,
+ const float* lambda) {
+ int i, num;
+ float rand[PART_LEN];
+ float noise, noiseAvg, tmp, tmpAvg;
+ int16_t randW16[PART_LEN];
+ complex_t u[PART_LEN1];
+
+ const float pi2 = 6.28318530717959f;
+ const float pi2t = pi2 / 32768;
+
+ // Generate a uniform random array on [0 1]
+ WebRtcSpl_RandUArray(randW16, PART_LEN, &aec->seed);
+
+ int16_t *randWptr = randW16;
+ float randTemp, randTemp2, randTemp3, randTemp4;
+ short tmp1s, tmp2s, tmp3s, tmp4s;
+
+ for (i = 0; i < PART_LEN; i+=4) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lh %[tmp1s], 0(%[randWptr]) \n\t"
+ "lh %[tmp2s], 2(%[randWptr]) \n\t"
+ "lh %[tmp3s], 4(%[randWptr]) \n\t"
+ "lh %[tmp4s], 6(%[randWptr]) \n\t"
+ "mtc1 %[tmp1s], %[randTemp] \n\t"
+ "mtc1 %[tmp2s], %[randTemp2] \n\t"
+ "mtc1 %[tmp3s], %[randTemp3] \n\t"
+ "mtc1 %[tmp4s], %[randTemp4] \n\t"
+ "cvt.s.w %[randTemp], %[randTemp] \n\t"
+ "cvt.s.w %[randTemp2], %[randTemp2] \n\t"
+ "cvt.s.w %[randTemp3], %[randTemp3] \n\t"
+ "cvt.s.w %[randTemp4], %[randTemp4] \n\t"
+ "addiu %[randWptr], %[randWptr], 8 \n\t"
+ "mul.s %[randTemp], %[randTemp], %[pi2t] \n\t"
+ "mul.s %[randTemp2], %[randTemp2], %[pi2t] \n\t"
+ "mul.s %[randTemp3], %[randTemp3], %[pi2t] \n\t"
+ "mul.s %[randTemp4], %[randTemp4], %[pi2t] \n\t"
+ ".set pop \n\t"
+ : [randWptr] "+r" (randWptr), [randTemp] "=&f" (randTemp),
+ [randTemp2] "=&f" (randTemp2), [randTemp3] "=&f" (randTemp3),
+ [randTemp4] "=&f" (randTemp4), [tmp1s] "=&r" (tmp1s),
+ [tmp2s] "=&r" (tmp2s), [tmp3s] "=&r" (tmp3s),
+ [tmp4s] "=&r" (tmp4s)
+ : [pi2t] "f" (pi2t)
+ : "memory"
+ );
+
+ u[i+1][0] = (float)cos(randTemp);
+ u[i+1][1] = (float)sin(randTemp);
+ u[i+2][0] = (float)cos(randTemp2);
+ u[i+2][1] = (float)sin(randTemp2);
+ u[i+3][0] = (float)cos(randTemp3);
+ u[i+3][1] = (float)sin(randTemp3);
+ u[i+4][0] = (float)cos(randTemp4);
+ u[i+4][1] = (float)sin(randTemp4);
+ }
+
+ // Reject LF noise
+ float *u_ptr = &u[1][0];
+ float noise2, noise3, noise4;
+ float tmp1f, tmp2f, tmp3f, tmp4f, tmp5f, tmp6f, tmp7f, tmp8f;
+
+ u[0][0] = 0;
+ u[0][1] = 0;
+ for (i = 1; i < PART_LEN1; i+=4) {
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwc1 %[noise], 4(%[noisePow]) \n\t"
+ "lwc1 %[noise2], 8(%[noisePow]) \n\t"
+ "lwc1 %[noise3], 12(%[noisePow]) \n\t"
+ "lwc1 %[noise4], 16(%[noisePow]) \n\t"
+ "sqrt.s %[noise], %[noise] \n\t"
+ "sqrt.s %[noise2], %[noise2] \n\t"
+ "sqrt.s %[noise3], %[noise3] \n\t"
+ "sqrt.s %[noise4], %[noise4] \n\t"
+ "lwc1 %[tmp1f], 0(%[u_ptr]) \n\t"
+ "lwc1 %[tmp2f], 4(%[u_ptr]) \n\t"
+ "lwc1 %[tmp3f], 8(%[u_ptr]) \n\t"
+ "lwc1 %[tmp4f], 12(%[u_ptr]) \n\t"
+ "lwc1 %[tmp5f], 16(%[u_ptr]) \n\t"
+ "lwc1 %[tmp6f], 20(%[u_ptr]) \n\t"
+ "lwc1 %[tmp7f], 24(%[u_ptr]) \n\t"
+ "lwc1 %[tmp8f], 28(%[u_ptr]) \n\t"
+ "addiu %[noisePow], %[noisePow], 16 \n\t"
+ "mul.s %[tmp1f], %[tmp1f], %[noise] \n\t"
+ "mul.s %[tmp2f], %[tmp2f], %[noise] \n\t"
+ "mul.s %[tmp3f], %[tmp3f], %[noise2] \n\t"
+ "mul.s %[tmp4f], %[tmp4f], %[noise2] \n\t"
+ "mul.s %[tmp5f], %[tmp5f], %[noise3] \n\t"
+ "mul.s %[tmp6f], %[tmp6f], %[noise3] \n\t"
+ "swc1 %[tmp1f], 0(%[u_ptr]) \n\t"
+ "swc1 %[tmp3f], 8(%[u_ptr]) \n\t"
+ "mul.s %[tmp8f], %[tmp8f], %[noise4] \n\t"
+ "mul.s %[tmp7f], %[tmp7f], %[noise4] \n\t"
+ "neg.s %[tmp2f] \n\t"
+ "neg.s %[tmp4f] \n\t"
+ "neg.s %[tmp6f] \n\t"
+ "neg.s %[tmp8f] \n\t"
+ "swc1 %[tmp5f], 16(%[u_ptr]) \n\t"
+ "swc1 %[tmp7f], 24(%[u_ptr]) \n\t"
+ "swc1 %[tmp2f], 4(%[u_ptr]) \n\t"
+ "swc1 %[tmp4f], 12(%[u_ptr]) \n\t"
+ "swc1 %[tmp6f], 20(%[u_ptr]) \n\t"
+ "swc1 %[tmp8f], 28(%[u_ptr]) \n\t"
+ "addiu %[u_ptr], %[u_ptr], 32 \n\t"
+ ".set pop \n\t"
+ : [u_ptr] "+r" (u_ptr), [noisePow] "+r" (noisePow),
+ [noise] "=&f" (noise), [noise2] "=&f" (noise2),
+ [noise3] "=&f" (noise3), [noise4] "=&f" (noise4),
+ [tmp1f] "=&f" (tmp1f), [tmp2f] "=&f" (tmp2f),
+ [tmp3f] "=&f" (tmp3f), [tmp4f] "=&f" (tmp4f),
+ [tmp5f] "=&f" (tmp5f), [tmp6f] "=&f" (tmp6f),
+ [tmp7f] "=&f" (tmp7f), [tmp8f] "=&f" (tmp8f)
+ :
+ : "memory"
+ );
+ }
+ u[PART_LEN][1] = 0;
+ noisePow -= PART_LEN;
+
+ u_ptr = &u[0][0];
+ float *u_ptr_end = &u[PART_LEN][0];
+ float *efw_ptr_0 = &efw[0][0];
+ float *efw_ptr_1 = &efw[1][0];
+ float tmp9f, tmp10f;
+ const float tmp1c = 1.0;
+ const float tmp2c = 0.0;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lwc1 %[tmp1f], 0(%[lambda]) \n\t"
+ "lwc1 %[tmp6f], 4(%[lambda]) \n\t"
+ "addiu %[lambda], %[lambda], 8 \n\t"
+ "c.lt.s %[tmp1f], %[tmp1c] \n\t"
+ "bc1f 4f \n\t"
+ " nop \n\t"
+ "c.lt.s %[tmp6f], %[tmp1c] \n\t"
+ "bc1f 3f \n\t"
+ " nop \n\t"
+ "2: \n\t"
+ "mul.s %[tmp1f], %[tmp1f], %[tmp1f] \n\t"
+ "mul.s %[tmp6f], %[tmp6f], %[tmp6f] \n\t"
+ "sub.s %[tmp1f], %[tmp1c], %[tmp1f] \n\t"
+ "sub.s %[tmp6f], %[tmp1c], %[tmp6f] \n\t"
+ "sqrt.s %[tmp1f], %[tmp1f] \n\t"
+ "sqrt.s %[tmp6f], %[tmp6f] \n\t"
+ "lwc1 %[tmp2f], 0(%[efw_ptr_0]) \n\t"
+ "lwc1 %[tmp3f], 0(%[u_ptr]) \n\t"
+ "lwc1 %[tmp7f], 4(%[efw_ptr_0]) \n\t"
+ "lwc1 %[tmp8f], 8(%[u_ptr]) \n\t"
+ "lwc1 %[tmp4f], 0(%[efw_ptr_1]) \n\t"
+ "lwc1 %[tmp5f], 4(%[u_ptr]) \n\t"
+ "lwc1 %[tmp9f], 4(%[efw_ptr_1]) \n\t"
+ "lwc1 %[tmp10f], 12(%[u_ptr]) \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[tmp3f], %[tmp1f], %[tmp3f] \n\t"
+ "add.s %[tmp2f], %[tmp2f], %[tmp3f] \n\t"
+ "mul.s %[tmp3f], %[tmp1f], %[tmp5f] \n\t"
+ "add.s %[tmp4f], %[tmp4f], %[tmp3f] \n\t"
+ "mul.s %[tmp3f], %[tmp6f], %[tmp8f] \n\t"
+ "add.s %[tmp7f], %[tmp7f], %[tmp3f] \n\t"
+ "mul.s %[tmp3f], %[tmp6f], %[tmp10f] \n\t"
+ "add.s %[tmp9f], %[tmp9f], %[tmp3f] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[tmp2f], %[tmp2f], %[tmp1f], %[tmp3f] \n\t"
+ "madd.s %[tmp4f], %[tmp4f], %[tmp1f], %[tmp5f] \n\t"
+ "madd.s %[tmp7f], %[tmp7f], %[tmp6f], %[tmp8f] \n\t"
+ "madd.s %[tmp9f], %[tmp9f], %[tmp6f], %[tmp10f] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[tmp2f], 0(%[efw_ptr_0]) \n\t"
+ "swc1 %[tmp4f], 0(%[efw_ptr_1]) \n\t"
+ "swc1 %[tmp7f], 4(%[efw_ptr_0]) \n\t"
+ "b 5f \n\t"
+ " swc1 %[tmp9f], 4(%[efw_ptr_1]) \n\t"
+ "3: \n\t"
+ "mul.s %[tmp1f], %[tmp1f], %[tmp1f] \n\t"
+ "sub.s %[tmp1f], %[tmp1c], %[tmp1f] \n\t"
+ "sqrt.s %[tmp1f], %[tmp1f] \n\t"
+ "lwc1 %[tmp2f], 0(%[efw_ptr_0]) \n\t"
+ "lwc1 %[tmp3f], 0(%[u_ptr]) \n\t"
+ "lwc1 %[tmp4f], 0(%[efw_ptr_1]) \n\t"
+ "lwc1 %[tmp5f], 4(%[u_ptr]) \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[tmp3f], %[tmp1f], %[tmp3f] \n\t"
+ "add.s %[tmp2f], %[tmp2f], %[tmp3f] \n\t"
+ "mul.s %[tmp3f], %[tmp1f], %[tmp5f] \n\t"
+ "add.s %[tmp4f], %[tmp4f], %[tmp3f] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[tmp2f], %[tmp2f], %[tmp1f], %[tmp3f] \n\t"
+ "madd.s %[tmp4f], %[tmp4f], %[tmp1f], %[tmp5f] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[tmp2f], 0(%[efw_ptr_0]) \n\t"
+ "b 5f \n\t"
+ " swc1 %[tmp4f], 0(%[efw_ptr_1]) \n\t"
+ "4: \n\t"
+ "c.lt.s %[tmp6f], %[tmp1c] \n\t"
+ "bc1f 5f \n\t"
+ " nop \n\t"
+ "mul.s %[tmp6f], %[tmp6f], %[tmp6f] \n\t"
+ "sub.s %[tmp6f], %[tmp1c], %[tmp6f] \n\t"
+ "sqrt.s %[tmp6f], %[tmp6f] \n\t"
+ "lwc1 %[tmp7f], 4(%[efw_ptr_0]) \n\t"
+ "lwc1 %[tmp8f], 8(%[u_ptr]) \n\t"
+ "lwc1 %[tmp9f], 4(%[efw_ptr_1]) \n\t"
+ "lwc1 %[tmp10f], 12(%[u_ptr]) \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[tmp3f], %[tmp6f], %[tmp8f] \n\t"
+ "add.s %[tmp7f], %[tmp7f], %[tmp3f] \n\t"
+ "mul.s %[tmp3f], %[tmp6f], %[tmp10f] \n\t"
+ "add.s %[tmp9f], %[tmp9f], %[tmp3f] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[tmp7f], %[tmp7f], %[tmp6f], %[tmp8f] \n\t"
+ "madd.s %[tmp9f], %[tmp9f], %[tmp6f], %[tmp10f] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[tmp7f], 4(%[efw_ptr_0]) \n\t"
+ "swc1 %[tmp9f], 4(%[efw_ptr_1]) \n\t"
+ "5: \n\t"
+ "addiu %[u_ptr], %[u_ptr], 16 \n\t"
+ "addiu %[efw_ptr_0], %[efw_ptr_0], 8 \n\t"
+ "bne %[u_ptr], %[u_ptr_end], 1b \n\t"
+ " addiu %[efw_ptr_1], %[efw_ptr_1], 8 \n\t"
+ ".set pop \n\t"
+ : [lambda] "+r" (lambda), [u_ptr] "+r" (u_ptr),
+ [efw_ptr_0] "+r" (efw_ptr_0), [efw_ptr_1] "+r" (efw_ptr_1),
+ [tmp1f] "=&f" (tmp1f), [tmp2f] "=&f" (tmp2f), [tmp3f] "=&f" (tmp3f),
+ [tmp4f] "=&f" (tmp4f), [tmp5f] "=&f" (tmp5f),
+ [tmp6f] "=&f" (tmp6f), [tmp7f] "=&f" (tmp7f), [tmp8f] "=&f" (tmp8f),
+ [tmp9f] "=&f" (tmp9f), [tmp10f] "=&f" (tmp10f)
+ : [tmp1c] "f" (tmp1c), [tmp2c] "f" (tmp2c), [u_ptr_end] "r" (u_ptr_end)
+ : "memory"
+ );
+
+ lambda -= PART_LEN;
+ tmp = sqrtf(WEBRTC_SPL_MAX(1 - lambda[PART_LEN] * lambda[PART_LEN], 0));
+ //tmp = 1 - lambda[i];
+ efw[0][PART_LEN] += tmp * u[PART_LEN][0];
+ efw[1][PART_LEN] += tmp * u[PART_LEN][1];
+
+ // For H band comfort noise
+ // TODO: don't compute noise and "tmp" twice. Use the previous results.
+ noiseAvg = 0.0;
+ tmpAvg = 0.0;
+ num = 0;
+ if (aec->sampFreq == 32000 && flagHbandCn == 1) {
+ for (i = 0; i < PART_LEN; i++) {
+ rand[i] = ((float)randW16[i]) / 32768;
+ }
+
+ // average noise scale
+ // average over second half of freq spectrum (i.e., 4->8khz)
+ // TODO: we shouldn't need num. We know how many elements we're summing.
+ for (i = PART_LEN1 >> 1; i < PART_LEN1; i++) {
+ num++;
+ noiseAvg += sqrtf(noisePow[i]);
+ }
+ noiseAvg /= (float)num;
+
+ // average nlp scale
+ // average over second half of freq spectrum (i.e., 4->8khz)
+ // TODO: we shouldn't need num. We know how many elements we're summing.
+ num = 0;
+ for (i = PART_LEN1 >> 1; i < PART_LEN1; i++) {
+ num++;
+ tmpAvg += sqrtf(WEBRTC_SPL_MAX(1 - lambda[i] * lambda[i], 0));
+ }
+ tmpAvg /= (float)num;
+
+ // Use average noise for H band
+ // TODO: we should probably have a new random vector here.
+ // Reject LF noise
+ u[0][0] = 0;
+ u[0][1] = 0;
+ for (i = 1; i < PART_LEN1; i++) {
+ tmp = pi2 * rand[i - 1];
+
+ // Use average noise for H band
+ u[i][0] = noiseAvg * (float)cos(tmp);
+ u[i][1] = -noiseAvg * (float)sin(tmp);
+ }
+ u[PART_LEN][1] = 0;
+
+ for (i = 0; i < PART_LEN1; i++) {
+ // Use average NLP weight for H band
+ comfortNoiseHband[i][0] = tmpAvg * u[i][0];
+ comfortNoiseHband[i][1] = tmpAvg * u[i][1];
+ }
+ }
+}
+
+void WebRtcAec_FilterFar_mips(AecCore *aec, float yf[2][PART_LEN1]) {
+ int i;
+ for (i = 0; i < aec->num_partitions; i++) {
+ int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ int pos = i * PART_LEN1;
+ // Check for wrap
+ if (i + aec->xfBufBlockPos >= aec->num_partitions) {
+ xPos -= aec->num_partitions * (PART_LEN1);
+ }
+ float *yf0 = yf[0];
+ float *yf1 = yf[1];
+ float *aRe = aec->xfBuf[0] + xPos;
+ float *aIm = aec->xfBuf[1] + xPos;
+ float *bRe = aec->wfBuf[0] + pos;
+ float *bIm = aec->wfBuf[1] + pos;
+ float f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13;
+ int len = PART_LEN1 >> 1;
+ int len1 = PART_LEN1 & 1;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lwc1 %[f0], 0(%[aRe]) \n\t"
+ "lwc1 %[f1], 0(%[bRe]) \n\t"
+ "lwc1 %[f2], 0(%[bIm]) \n\t"
+ "lwc1 %[f3], 0(%[aIm]) \n\t"
+ "lwc1 %[f4], 4(%[aRe]) \n\t"
+ "lwc1 %[f5], 4(%[bRe]) \n\t"
+ "lwc1 %[f6], 4(%[bIm]) \n\t"
+ "mul.s %[f8], %[f0], %[f1] \n\t"
+ "mul.s %[f0], %[f0], %[f2] \n\t"
+ "mul.s %[f9], %[f4], %[f5] \n\t"
+ "mul.s %[f4], %[f4], %[f6] \n\t"
+ "lwc1 %[f7], 4(%[aIm]) \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[f12], %[f2], %[f3] \n\t"
+ "mul.s %[f1], %[f3], %[f1] \n\t"
+ "mul.s %[f11], %[f6], %[f7] \n\t"
+ "addiu %[aRe], %[aRe], 8 \n\t"
+ "addiu %[aIm], %[aIm], 8 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "sub.s %[f8], %[f8], %[f12] \n\t"
+ "mul.s %[f12], %[f7], %[f5] \n\t"
+ "lwc1 %[f2], 0(%[yf0]) \n\t"
+ "add.s %[f1], %[f0], %[f1] \n\t"
+ "lwc1 %[f3], 0(%[yf1]) \n\t"
+ "sub.s %[f9], %[f9], %[f11] \n\t"
+ "lwc1 %[f6], 4(%[yf0]) \n\t"
+ "add.s %[f4], %[f4], %[f12] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "addiu %[aRe], %[aRe], 8 \n\t"
+ "addiu %[aIm], %[aIm], 8 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "nmsub.s %[f8], %[f8], %[f2], %[f3] \n\t"
+ "lwc1 %[f2], 0(%[yf0]) \n\t"
+ "madd.s %[f1], %[f0], %[f3], %[f1] \n\t"
+ "lwc1 %[f3], 0(%[yf1]) \n\t"
+ "nmsub.s %[f9], %[f9], %[f6], %[f7] \n\t"
+ "lwc1 %[f6], 4(%[yf0]) \n\t"
+ "madd.s %[f4], %[f4], %[f7], %[f5] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "lwc1 %[f5], 4(%[yf1]) \n\t"
+ "add.s %[f2], %[f2], %[f8] \n\t"
+ "addiu %[bRe], %[bRe], 8 \n\t"
+ "addiu %[bIm], %[bIm], 8 \n\t"
+ "add.s %[f3], %[f3], %[f1] \n\t"
+ "add.s %[f6], %[f6], %[f9] \n\t"
+ "add.s %[f5], %[f5], %[f4] \n\t"
+ "swc1 %[f2], 0(%[yf0]) \n\t"
+ "swc1 %[f3], 0(%[yf1]) \n\t"
+ "swc1 %[f6], 4(%[yf0]) \n\t"
+ "swc1 %[f5], 4(%[yf1]) \n\t"
+ "addiu %[yf0], %[yf0], 8 \n\t"
+ "bgtz %[len], 1b \n\t"
+ " addiu %[yf1], %[yf1], 8 \n\t"
+ "lwc1 %[f0], 0(%[aRe]) \n\t"
+ "lwc1 %[f1], 0(%[bRe]) \n\t"
+ "lwc1 %[f2], 0(%[bIm]) \n\t"
+ "lwc1 %[f3], 0(%[aIm]) \n\t"
+ "mul.s %[f8], %[f0], %[f1] \n\t"
+ "mul.s %[f0], %[f0], %[f2] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[f12], %[f2], %[f3] \n\t"
+ "mul.s %[f1], %[f3], %[f1] \n\t"
+ "sub.s %[f8], %[f8], %[f12] \n\t"
+ "lwc1 %[f2], 0(%[yf0]) \n\t"
+ "add.s %[f1], %[f0], %[f1] \n\t"
+ "lwc1 %[f3], 0(%[yf1]) \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmsub.s %[f8], %[f8], %[f2], %[f3] \n\t"
+ "lwc1 %[f2], 0(%[yf0]) \n\t"
+ "madd.s %[f1], %[f0], %[f3], %[f1] \n\t"
+ "lwc1 %[f3], 0(%[yf1]) \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "add.s %[f2], %[f2], %[f8] \n\t"
+ "add.s %[f3], %[f3], %[f1] \n\t"
+ "swc1 %[f2], 0(%[yf0]) \n\t"
+ "swc1 %[f3], 0(%[yf1]) \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+ [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+ [f6] "=&f" (f6), [f7] "=&f" (f7), [f8] "=&f" (f8),
+ [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11),
+ [f12] "=&f" (f12), [f13] "=&f" (f13), [aRe] "+r" (aRe),
+ [aIm] "+r" (aIm), [bRe] "+r" (bRe), [bIm] "+r" (bIm),
+ [yf0] "+r" (yf0), [yf1] "+r" (yf1), [len] "+r" (len)
+ : [len1] "r" (len1)
+ : "memory"
+ );
+ }
+}
+
+void WebRtcAec_FilterAdaptation_mips(AecCore *aec,
+ float *fft,
+ float ef[2][PART_LEN1]) {
+ int i;
+ for (i = 0; i < aec->num_partitions; i++) {
+ int xPos = (i + aec->xfBufBlockPos)*(PART_LEN1);
+ int pos;
+ // Check for wrap
+ if (i + aec->xfBufBlockPos >= aec->num_partitions) {
+ xPos -= aec->num_partitions * PART_LEN1;
+ }
+
+ pos = i * PART_LEN1;
+ float *aRe = aec->xfBuf[0] + xPos;
+ float *aIm = aec->xfBuf[1] + xPos;
+ float *bRe = ef[0];
+ float *bIm = ef[1];
+ float *fft_tmp = fft;
+
+ float f0, f1, f2, f3, f4, f5, f6 ,f7, f8, f9, f10, f11, f12;
+ int len = PART_LEN >> 1;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lwc1 %[f0], 0(%[aRe]) \n\t"
+ "lwc1 %[f1], 0(%[bRe]) \n\t"
+ "lwc1 %[f2], 0(%[bIm]) \n\t"
+ "lwc1 %[f4], 4(%[aRe]) \n\t"
+ "lwc1 %[f5], 4(%[bRe]) \n\t"
+ "lwc1 %[f6], 4(%[bIm]) \n\t"
+ "addiu %[aRe], %[aRe], 8 \n\t"
+ "addiu %[bRe], %[bRe], 8 \n\t"
+ "mul.s %[f8], %[f0], %[f1] \n\t"
+ "mul.s %[f0], %[f0], %[f2] \n\t"
+ "lwc1 %[f3], 0(%[aIm]) \n\t"
+ "mul.s %[f9], %[f4], %[f5] \n\t"
+ "lwc1 %[f7], 4(%[aIm]) \n\t"
+ "mul.s %[f4], %[f4], %[f6] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[f10], %[f3], %[f2] \n\t"
+ "mul.s %[f1], %[f3], %[f1] \n\t"
+ "mul.s %[f11], %[f7], %[f6] \n\t"
+ "mul.s %[f5], %[f7], %[f5] \n\t"
+ "addiu %[aIm], %[aIm], 8 \n\t"
+ "addiu %[bIm], %[bIm], 8 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "add.s %[f8], %[f8], %[f10] \n\t"
+ "sub.s %[f1], %[f0], %[f1] \n\t"
+ "add.s %[f9], %[f9], %[f11] \n\t"
+ "sub.s %[f5], %[f4], %[f5] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "addiu %[aIm], %[aIm], 8 \n\t"
+ "addiu %[bIm], %[bIm], 8 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "madd.s %[f8], %[f8], %[f3], %[f2] \n\t"
+ "nmsub.s %[f1], %[f0], %[f3], %[f1] \n\t"
+ "madd.s %[f9], %[f9], %[f7], %[f6] \n\t"
+ "nmsub.s %[f5], %[f4], %[f7], %[f5] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[f8], 0(%[fft_tmp]) \n\t"
+ "swc1 %[f1], 4(%[fft_tmp]) \n\t"
+ "swc1 %[f9], 8(%[fft_tmp]) \n\t"
+ "swc1 %[f5], 12(%[fft_tmp]) \n\t"
+ "bgtz %[len], 1b \n\t"
+ " addiu %[fft_tmp], %[fft_tmp], 16 \n\t"
+ "lwc1 %[f0], 0(%[aRe]) \n\t"
+ "lwc1 %[f1], 0(%[bRe]) \n\t"
+ "lwc1 %[f2], 0(%[bIm]) \n\t"
+ "lwc1 %[f3], 0(%[aIm]) \n\t"
+ "mul.s %[f8], %[f0], %[f1] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[f10], %[f3], %[f2] \n\t"
+ "add.s %[f8], %[f8], %[f10] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[f8], %[f8], %[f3], %[f2] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[f8], 4(%[fft]) \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+ [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+ [f6] "=&f" (f6), [f7] "=&f" (f7), [f8] "=&f" (f8),
+ [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11),
+ [f12] "=&f" (f12), [aRe] "+r" (aRe), [aIm] "+r" (aIm),
+ [bRe] "+r" (bRe), [bIm] "+r" (bIm), [fft_tmp] "+r" (fft_tmp),
+ [len] "+r" (len), [fft] "=&r" (fft)
+ :
+ : "memory"
+ );
+
+ aec_rdft_inverse_128(fft);
+ memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
+
+ // fft scaling
+ {
+ float scale = 2.0f / PART_LEN2;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[fft_tmp], %[fft], 0 \n\t"
+ "addiu %[len], $zero, 8 \n\t"
+ "1: \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "lwc1 %[f0], 0(%[fft_tmp]) \n\t"
+ "lwc1 %[f1], 4(%[fft_tmp]) \n\t"
+ "lwc1 %[f2], 8(%[fft_tmp]) \n\t"
+ "lwc1 %[f3], 12(%[fft_tmp]) \n\t"
+ "mul.s %[f0], %[f0], %[scale] \n\t"
+ "mul.s %[f1], %[f1], %[scale] \n\t"
+ "mul.s %[f2], %[f2], %[scale] \n\t"
+ "mul.s %[f3], %[f3], %[scale] \n\t"
+ "lwc1 %[f4], 16(%[fft_tmp]) \n\t"
+ "lwc1 %[f5], 20(%[fft_tmp]) \n\t"
+ "lwc1 %[f6], 24(%[fft_tmp]) \n\t"
+ "lwc1 %[f7], 28(%[fft_tmp]) \n\t"
+ "mul.s %[f4], %[f4], %[scale] \n\t"
+ "mul.s %[f5], %[f5], %[scale] \n\t"
+ "mul.s %[f6], %[f6], %[scale] \n\t"
+ "mul.s %[f7], %[f7], %[scale] \n\t"
+ "swc1 %[f0], 0(%[fft_tmp]) \n\t"
+ "swc1 %[f1], 4(%[fft_tmp]) \n\t"
+ "swc1 %[f2], 8(%[fft_tmp]) \n\t"
+ "swc1 %[f3], 12(%[fft_tmp]) \n\t"
+ "swc1 %[f4], 16(%[fft_tmp]) \n\t"
+ "swc1 %[f5], 20(%[fft_tmp]) \n\t"
+ "swc1 %[f6], 24(%[fft_tmp]) \n\t"
+ "swc1 %[f7], 28(%[fft_tmp]) \n\t"
+ "bgtz %[len], 1b \n\t"
+ " addiu %[fft_tmp], %[fft_tmp], 32 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+ [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+ [f6] "=&f" (f6), [f7] "=&f" (f7), [len] "=&r" (len),
+ [fft_tmp] "=&r" (fft_tmp)
+ : [scale] "f" (scale), [fft] "r" (fft)
+ : "memory"
+ );
+ }
+ aec_rdft_forward_128(fft);
+ aRe = aec->wfBuf[0] + pos;
+ aIm = aec->wfBuf[1] + pos;
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[fft_tmp], %[fft], 0 \n\t"
+ "addiu %[len], $zero, 31 \n\t"
+ "lwc1 %[f0], 0(%[aRe]) \n\t"
+ "lwc1 %[f1], 0(%[fft_tmp]) \n\t"
+ "lwc1 %[f2], 256(%[aRe]) \n\t"
+ "lwc1 %[f3], 4(%[fft_tmp]) \n\t"
+ "lwc1 %[f4], 4(%[aRe]) \n\t"
+ "lwc1 %[f5], 8(%[fft_tmp]) \n\t"
+ "lwc1 %[f6], 4(%[aIm]) \n\t"
+ "lwc1 %[f7], 12(%[fft_tmp]) \n\t"
+ "add.s %[f0], %[f0], %[f1] \n\t"
+ "add.s %[f2], %[f2], %[f3] \n\t"
+ "add.s %[f4], %[f4], %[f5] \n\t"
+ "add.s %[f6], %[f6], %[f7] \n\t"
+ "addiu %[fft_tmp], %[fft_tmp], 16 \n\t"
+ "swc1 %[f0], 0(%[aRe]) \n\t"
+ "swc1 %[f2], 256(%[aRe]) \n\t"
+ "swc1 %[f4], 4(%[aRe]) \n\t"
+ "addiu %[aRe], %[aRe], 8 \n\t"
+ "swc1 %[f6], 4(%[aIm]) \n\t"
+ "addiu %[aIm], %[aIm], 8 \n\t"
+ "1: \n\t"
+ "lwc1 %[f0], 0(%[aRe]) \n\t"
+ "lwc1 %[f1], 0(%[fft_tmp]) \n\t"
+ "lwc1 %[f2], 0(%[aIm]) \n\t"
+ "lwc1 %[f3], 4(%[fft_tmp]) \n\t"
+ "lwc1 %[f4], 4(%[aRe]) \n\t"
+ "lwc1 %[f5], 8(%[fft_tmp]) \n\t"
+ "lwc1 %[f6], 4(%[aIm]) \n\t"
+ "lwc1 %[f7], 12(%[fft_tmp]) \n\t"
+ "add.s %[f0], %[f0], %[f1] \n\t"
+ "add.s %[f2], %[f2], %[f3] \n\t"
+ "add.s %[f4], %[f4], %[f5] \n\t"
+ "add.s %[f6], %[f6], %[f7] \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "addiu %[fft_tmp], %[fft_tmp], 16 \n\t"
+ "swc1 %[f0], 0(%[aRe]) \n\t"
+ "swc1 %[f2], 0(%[aIm]) \n\t"
+ "swc1 %[f4], 4(%[aRe]) \n\t"
+ "addiu %[aRe], %[aRe], 8 \n\t"
+ "swc1 %[f6], 4(%[aIm]) \n\t"
+ "bgtz %[len], 1b \n\t"
+ " addiu %[aIm], %[aIm], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+ [f3] "=&f" (f3), [f4] "=&f" (f4), [f5] "=&f" (f5),
+ [f6] "=&f" (f6), [f7] "=&f" (f7), [len] "=&r" (len),
+ [fft_tmp] "=&r" (fft_tmp)
+ : [aRe] "r" (aRe), [aIm] "r" (aIm), [fft] "r" (fft)
+ : "memory"
+ );
+ }
+}
+
+void WebRtcAec_OverdriveAndSuppress_mips(AecCore *aec,
+ float hNl[PART_LEN1],
+ const float hNlFb,
+ float efw[2][PART_LEN1]) {
+ int i;
+ const float one = 1.0;
+ float *p_hNl, *p_efw0, *p_efw1;
+ float *p_WebRtcAec_wC;
+ float temp1, temp2, temp3, temp4;
+
+ p_hNl = &hNl[0];
+ p_efw0 = &efw[0][0];
+ p_efw1 = &efw[1][0];
+ p_WebRtcAec_wC = (float*)&WebRtcAec_weightCurve[0];
+
+ for (i = 0; i < PART_LEN1; i++) {
+ // Weight subbands
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwc1 %[temp1], 0(%[p_hNl]) \n\t"
+ "lwc1 %[temp2], 0(%[p_wC]) \n\t"
+ "c.lt.s %[hNlFb], %[temp1] \n\t"
+ "bc1f 1f \n\t"
+ " mul.s %[temp3], %[temp2], %[hNlFb] \n\t"
+ "sub.s %[temp4], %[one], %[temp2] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[temp1], %[temp1], %[temp4] \n\t"
+ "add.s %[temp1], %[temp3], %[temp1] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[temp1], %[temp3], %[temp1], %[temp4] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[temp1], 0(%[p_hNl]) \n\t"
+ "1: \n\t"
+ "addiu %[p_wC], %[p_wC], 4 \n\t"
+ ".set pop \n\t"
+ : [temp1] "=&f" (temp1), [temp2] "=&f" (temp2), [temp3] "=&f" (temp3),
+ [temp4] "=&f" (temp4), [p_wC] "+r" (p_WebRtcAec_wC)
+ : [hNlFb] "f" (hNlFb), [one] "f" (one), [p_hNl] "r" (p_hNl)
+ : "memory"
+ );
+
+ hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);
+
+ __asm __volatile (
+ "lwc1 %[temp1], 0(%[p_hNl]) \n\t"
+ "lwc1 %[temp3], 0(%[p_efw1]) \n\t"
+ "lwc1 %[temp2], 0(%[p_efw0]) \n\t"
+ "addiu %[p_hNl], %[p_hNl], 4 \n\t"
+ "mul.s %[temp3], %[temp3], %[temp1] \n\t"
+ "mul.s %[temp2], %[temp2], %[temp1] \n\t"
+ "addiu %[p_efw0], %[p_efw0], 4 \n\t"
+ "addiu %[p_efw1], %[p_efw1], 4 \n\t"
+ "neg.s %[temp4], %[temp3] \n\t"
+ "swc1 %[temp2], -4(%[p_efw0]) \n\t"
+ "swc1 %[temp4], -4(%[p_efw1]) \n\t"
+ : [temp1] "=&f" (temp1), [temp2] "=&f" (temp2), [temp3] "=&f" (temp3),
+ [temp4] "=&f" (temp4), [p_efw0] "+r" (p_efw0), [p_efw1] "+r" (p_efw1),
+ [p_hNl] "+r" (p_hNl)
+ :
+ : "memory"
+ );
+ }
+}
+
+void WebRtcAec_ScaleErrorSignal_mips(AecCore *aec, float ef[2][PART_LEN1]) {
+ const float mu = aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
+ const float error_threshold = aec->extended_filter_enabled
+ ? kExtendedErrorThreshold
+ : aec->normal_error_threshold;
+ int len = (PART_LEN1);
+ float *ef0 = ef[0];
+ float *ef1 = ef[1];
+ float *xPow = aec->xPow;
+ float fac1 = 1e-10f;
+ float err_th2 = error_threshold * error_threshold;
+ float f0, f1, f2;
+#if !defined(MIPS32_R2_LE)
+ float f3;
+#endif
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "lwc1 %[f0], 0(%[xPow]) \n\t"
+ "lwc1 %[f1], 0(%[ef0]) \n\t"
+ "lwc1 %[f2], 0(%[ef1]) \n\t"
+ "add.s %[f0], %[f0], %[fac1] \n\t"
+ "div.s %[f1], %[f1], %[f0] \n\t"
+ "div.s %[f2], %[f2], %[f0] \n\t"
+ "mul.s %[f0], %[f1], %[f1] \n\t"
+#if defined(MIPS32_R2_LE)
+ "madd.s %[f0], %[f0], %[f2], %[f2] \n\t"
+#else
+ "mul.s %[f3], %[f2], %[f2] \n\t"
+ "add.s %[f0], %[f0], %[f3] \n\t"
+#endif
+ "c.le.s %[f0], %[err_th2] \n\t"
+ "nop \n\t"
+ "bc1t 2f \n\t"
+ " nop \n\t"
+ "sqrt.s %[f0], %[f0] \n\t"
+ "add.s %[f0], %[f0], %[fac1] \n\t"
+ "div.s %[f0], %[err_th], %[f0] \n\t"
+ "mul.s %[f1], %[f1], %[f0] \n\t"
+ "mul.s %[f2], %[f2], %[f0] \n\t"
+ "2: \n\t"
+ "mul.s %[f1], %[f1], %[mu] \n\t"
+ "mul.s %[f2], %[f2], %[mu] \n\t"
+ "swc1 %[f1], 0(%[ef0]) \n\t"
+ "swc1 %[f2], 0(%[ef1]) \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "addiu %[xPow], %[xPow], 4 \n\t"
+ "addiu %[ef0], %[ef0], 4 \n\t"
+ "bgtz %[len], 1b \n\t"
+ " addiu %[ef1], %[ef1], 4 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2),
+#if !defined(MIPS32_R2_LE)
+ [f3] "=&f" (f3),
+#endif
+ [xPow] "+r" (xPow), [ef0] "+r" (ef0), [ef1] "+r" (ef1),
+ [len] "+r" (len)
+ : [fac1] "f" (fac1), [err_th2] "f" (err_th2), [mu] "f" (mu),
+ [err_th] "f" (error_threshold)
+ : "memory"
+ );
+}
+
+void WebRtcAec_InitAec_mips(void)
+{
+ WebRtcAec_FilterFar = WebRtcAec_FilterFar_mips;
+ WebRtcAec_FilterAdaptation = WebRtcAec_FilterAdaptation_mips;
+ WebRtcAec_ScaleErrorSignal = WebRtcAec_ScaleErrorSignal_mips;
+ WebRtcAec_ComfortNoise = WebRtcAec_ComfortNoise_mips;
+ WebRtcAec_OverdriveAndSuppress = WebRtcAec_OverdriveAndSuppress_mips;
+}
+
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_neon.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_neon.c
new file mode 100644
index 00000000000..cec0a7e3379
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_core_neon.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * The core AEC algorithm, neon version of speed-critical functions.
+ *
+ * Based on aec_core_sse2.c.
+ */
+
+#include "webrtc/modules/audio_processing/aec/aec_core.h"
+
+#include <arm_neon.h>
+#include <math.h>
+#include <string.h> // memset
+
+#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
+#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
+
+enum { kShiftExponentIntoTopMantissa = 8 };
+enum { kFloatExponentShift = 23 };
+
+__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
+ return aRe * bRe - aIm * bIm;
+}
+
+static void FilterAdaptationNEON(AecCore* aec,
+ float* fft,
+ float ef[2][PART_LEN1]) {
+ int i;
+ const int num_partitions = aec->num_partitions;
+ for (i = 0; i < num_partitions; i++) {
+ int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
+ int pos = i * PART_LEN1;
+ int j;
+ // Check for wrap
+ if (i + aec->xfBufBlockPos >= num_partitions) {
+ xPos -= num_partitions * PART_LEN1;
+ }
+
+ // Process the whole array...
+ for (j = 0; j < PART_LEN; j += 4) {
+ // Load xfBuf and ef.
+ const float32x4_t xfBuf_re = vld1q_f32(&aec->xfBuf[0][xPos + j]);
+ const float32x4_t xfBuf_im = vld1q_f32(&aec->xfBuf[1][xPos + j]);
+ const float32x4_t ef_re = vld1q_f32(&ef[0][j]);
+ const float32x4_t ef_im = vld1q_f32(&ef[1][j]);
+ // Calculate the product of conjugate(xfBuf) by ef.
+ // re(conjugate(a) * b) = aRe * bRe + aIm * bIm
+ // im(conjugate(a) * b)= aRe * bIm - aIm * bRe
+ const float32x4_t a = vmulq_f32(xfBuf_re, ef_re);
+ const float32x4_t e = vmlaq_f32(a, xfBuf_im, ef_im);
+ const float32x4_t c = vmulq_f32(xfBuf_re, ef_im);
+ const float32x4_t f = vmlsq_f32(c, xfBuf_im, ef_re);
+ // Interleave real and imaginary parts.
+ const float32x4x2_t g_n_h = vzipq_f32(e, f);
+ // Store
+ vst1q_f32(&fft[2 * j + 0], g_n_h.val[0]);
+ vst1q_f32(&fft[2 * j + 4], g_n_h.val[1]);
+ }
+ // ... and fixup the first imaginary entry.
+ fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
+ -aec->xfBuf[1][xPos + PART_LEN],
+ ef[0][PART_LEN],
+ ef[1][PART_LEN]);
+
+ aec_rdft_inverse_128(fft);
+ memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
+
+ // fft scaling
+ {
+ const float scale = 2.0f / PART_LEN2;
+ const float32x4_t scale_ps = vmovq_n_f32(scale);
+ for (j = 0; j < PART_LEN; j += 4) {
+ const float32x4_t fft_ps = vld1q_f32(&fft[j]);
+ const float32x4_t fft_scale = vmulq_f32(fft_ps, scale_ps);
+ vst1q_f32(&fft[j], fft_scale);
+ }
+ }
+ aec_rdft_forward_128(fft);
+
+ {
+ const float wt1 = aec->wfBuf[1][pos];
+ aec->wfBuf[0][pos + PART_LEN] += fft[1];
+ for (j = 0; j < PART_LEN; j += 4) {
+ float32x4_t wtBuf_re = vld1q_f32(&aec->wfBuf[0][pos + j]);
+ float32x4_t wtBuf_im = vld1q_f32(&aec->wfBuf[1][pos + j]);
+ const float32x4_t fft0 = vld1q_f32(&fft[2 * j + 0]);
+ const float32x4_t fft4 = vld1q_f32(&fft[2 * j + 4]);
+ const float32x4x2_t fft_re_im = vuzpq_f32(fft0, fft4);
+ wtBuf_re = vaddq_f32(wtBuf_re, fft_re_im.val[0]);
+ wtBuf_im = vaddq_f32(wtBuf_im, fft_re_im.val[1]);
+
+ vst1q_f32(&aec->wfBuf[0][pos + j], wtBuf_re);
+ vst1q_f32(&aec->wfBuf[1][pos + j], wtBuf_im);
+ }
+ aec->wfBuf[1][pos] = wt1;
+ }
+ }
+}
+
+extern const float WebRtcAec_weightCurve[65];
+extern const float WebRtcAec_overDriveCurve[65];
+
+static float32x4_t vpowq_f32(float32x4_t a, float32x4_t b) {
+ // a^b = exp2(b * log2(a))
+ // exp2(x) and log2(x) are calculated using polynomial approximations.
+ float32x4_t log2_a, b_log2_a, a_exp_b;
+
+ // Calculate log2(x), x = a.
+ {
+ // To calculate log2(x), we decompose x like this:
+ // x = y * 2^n
+ // n is an integer
+ // y is in the [1.0, 2.0) range
+ //
+ // log2(x) = log2(y) + n
+ // n can be evaluated by playing with float representation.
+ // log2(y) in a small range can be approximated, this code uses an order
+ // five polynomial approximation. The coefficients have been
+ // estimated with the Remez algorithm and the resulting
+ // polynomial has a maximum relative error of 0.00086%.
+
+ // Compute n.
+ // This is done by masking the exponent, shifting it into the top bit of
+ // the mantissa, putting eight into the biased exponent (to shift/
+ // compensate the fact that the exponent has been shifted in the top/
+ // fractional part and finally getting rid of the implicit leading one
+ // from the mantissa by substracting it out.
+ const uint32x4_t vec_float_exponent_mask = vdupq_n_u32(0x7F800000);
+ const uint32x4_t vec_eight_biased_exponent = vdupq_n_u32(0x43800000);
+ const uint32x4_t vec_implicit_leading_one = vdupq_n_u32(0x43BF8000);
+ const uint32x4_t two_n = vandq_u32(vreinterpretq_u32_f32(a),
+ vec_float_exponent_mask);
+ const uint32x4_t n_1 = vshrq_n_u32(two_n, kShiftExponentIntoTopMantissa);
+ const uint32x4_t n_0 = vorrq_u32(n_1, vec_eight_biased_exponent);
+ const float32x4_t n =
+ vsubq_f32(vreinterpretq_f32_u32(n_0),
+ vreinterpretq_f32_u32(vec_implicit_leading_one));
+ // Compute y.
+ const uint32x4_t vec_mantissa_mask = vdupq_n_u32(0x007FFFFF);
+ const uint32x4_t vec_zero_biased_exponent_is_one = vdupq_n_u32(0x3F800000);
+ const uint32x4_t mantissa = vandq_u32(vreinterpretq_u32_f32(a),
+ vec_mantissa_mask);
+ const float32x4_t y =
+ vreinterpretq_f32_u32(vorrq_u32(mantissa,
+ vec_zero_biased_exponent_is_one));
+ // Approximate log2(y) ~= (y - 1) * pol5(y).
+ // pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
+ const float32x4_t C5 = vdupq_n_f32(-3.4436006e-2f);
+ const float32x4_t C4 = vdupq_n_f32(3.1821337e-1f);
+ const float32x4_t C3 = vdupq_n_f32(-1.2315303f);
+ const float32x4_t C2 = vdupq_n_f32(2.5988452f);
+ const float32x4_t C1 = vdupq_n_f32(-3.3241990f);
+ const float32x4_t C0 = vdupq_n_f32(3.1157899f);
+ float32x4_t pol5_y = C5;
+ pol5_y = vmlaq_f32(C4, y, pol5_y);
+ pol5_y = vmlaq_f32(C3, y, pol5_y);
+ pol5_y = vmlaq_f32(C2, y, pol5_y);
+ pol5_y = vmlaq_f32(C1, y, pol5_y);
+ pol5_y = vmlaq_f32(C0, y, pol5_y);
+ const float32x4_t y_minus_one =
+ vsubq_f32(y, vreinterpretq_f32_u32(vec_zero_biased_exponent_is_one));
+ const float32x4_t log2_y = vmulq_f32(y_minus_one, pol5_y);
+
+ // Combine parts.
+ log2_a = vaddq_f32(n, log2_y);
+ }
+
+ // b * log2(a)
+ b_log2_a = vmulq_f32(b, log2_a);
+
+ // Calculate exp2(x), x = b * log2(a).
+ {
+ // To calculate 2^x, we decompose x like this:
+ // x = n + y
+ // n is an integer, the value of x - 0.5 rounded down, therefore
+ // y is in the [0.5, 1.5) range
+ //
+ // 2^x = 2^n * 2^y
+ // 2^n can be evaluated by playing with float representation.
+ // 2^y in a small range can be approximated, this code uses an order two
+ // polynomial approximation. The coefficients have been estimated
+ // with the Remez algorithm and the resulting polynomial has a
+ // maximum relative error of 0.17%.
+ // To avoid over/underflow, we reduce the range of input to ]-127, 129].
+ const float32x4_t max_input = vdupq_n_f32(129.f);
+ const float32x4_t min_input = vdupq_n_f32(-126.99999f);
+ const float32x4_t x_min = vminq_f32(b_log2_a, max_input);
+ const float32x4_t x_max = vmaxq_f32(x_min, min_input);
+ // Compute n.
+ const float32x4_t half = vdupq_n_f32(0.5f);
+ const float32x4_t x_minus_half = vsubq_f32(x_max, half);
+ const int32x4_t x_minus_half_floor = vcvtq_s32_f32(x_minus_half);
+
+ // Compute 2^n.
+ const int32x4_t float_exponent_bias = vdupq_n_s32(127);
+ const int32x4_t two_n_exponent =
+ vaddq_s32(x_minus_half_floor, float_exponent_bias);
+ const float32x4_t two_n =
+ vreinterpretq_f32_s32(vshlq_n_s32(two_n_exponent, kFloatExponentShift));
+ // Compute y.
+ const float32x4_t y = vsubq_f32(x_max, vcvtq_f32_s32(x_minus_half_floor));
+
+ // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
+ const float32x4_t C2 = vdupq_n_f32(3.3718944e-1f);
+ const float32x4_t C1 = vdupq_n_f32(6.5763628e-1f);
+ const float32x4_t C0 = vdupq_n_f32(1.0017247f);
+ float32x4_t exp2_y = C2;
+ exp2_y = vmlaq_f32(C1, y, exp2_y);
+ exp2_y = vmlaq_f32(C0, y, exp2_y);
+
+ // Combine parts.
+ a_exp_b = vmulq_f32(exp2_y, two_n);
+ }
+
+ return a_exp_b;
+}
+
+static void OverdriveAndSuppressNEON(AecCore* aec,
+ float hNl[PART_LEN1],
+ const float hNlFb,
+ float efw[2][PART_LEN1]) {
+ int i;
+ const float32x4_t vec_hNlFb = vmovq_n_f32(hNlFb);
+ const float32x4_t vec_one = vdupq_n_f32(1.0f);
+ const float32x4_t vec_minus_one = vdupq_n_f32(-1.0f);
+ const float32x4_t vec_overDriveSm = vmovq_n_f32(aec->overDriveSm);
+
+ // vectorized code (four at once)
+ for (i = 0; i + 3 < PART_LEN1; i += 4) {
+ // Weight subbands
+ float32x4_t vec_hNl = vld1q_f32(&hNl[i]);
+ const float32x4_t vec_weightCurve = vld1q_f32(&WebRtcAec_weightCurve[i]);
+ const uint32x4_t bigger = vcgtq_f32(vec_hNl, vec_hNlFb);
+ const float32x4_t vec_weightCurve_hNlFb = vmulq_f32(vec_weightCurve,
+ vec_hNlFb);
+ const float32x4_t vec_one_weightCurve = vsubq_f32(vec_one, vec_weightCurve);
+ const float32x4_t vec_one_weightCurve_hNl = vmulq_f32(vec_one_weightCurve,
+ vec_hNl);
+ const uint32x4_t vec_if0 = vandq_u32(vmvnq_u32(bigger),
+ vreinterpretq_u32_f32(vec_hNl));
+ const float32x4_t vec_one_weightCurve_add =
+ vaddq_f32(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl);
+ const uint32x4_t vec_if1 =
+ vandq_u32(bigger, vreinterpretq_u32_f32(vec_one_weightCurve_add));
+
+ vec_hNl = vreinterpretq_f32_u32(vorrq_u32(vec_if0, vec_if1));
+
+ {
+ const float32x4_t vec_overDriveCurve =
+ vld1q_f32(&WebRtcAec_overDriveCurve[i]);
+ const float32x4_t vec_overDriveSm_overDriveCurve =
+ vmulq_f32(vec_overDriveSm, vec_overDriveCurve);
+ vec_hNl = vpowq_f32(vec_hNl, vec_overDriveSm_overDriveCurve);
+ vst1q_f32(&hNl[i], vec_hNl);
+ }
+
+ // Suppress error signal
+ {
+ float32x4_t vec_efw_re = vld1q_f32(&efw[0][i]);
+ float32x4_t vec_efw_im = vld1q_f32(&efw[1][i]);
+ vec_efw_re = vmulq_f32(vec_efw_re, vec_hNl);
+ vec_efw_im = vmulq_f32(vec_efw_im, vec_hNl);
+
+ // Ooura fft returns incorrect sign on imaginary component. It matters
+ // here because we are making an additive change with comfort noise.
+ vec_efw_im = vmulq_f32(vec_efw_im, vec_minus_one);
+ vst1q_f32(&efw[0][i], vec_efw_re);
+ vst1q_f32(&efw[1][i], vec_efw_im);
+ }
+ }
+
+ // scalar code for the remaining items.
+ for (; i < PART_LEN1; i++) {
+ // Weight subbands
+ if (hNl[i] > hNlFb) {
+ hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
+ (1 - WebRtcAec_weightCurve[i]) * hNl[i];
+ }
+
+ hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);
+
+ // Suppress error signal
+ efw[0][i] *= hNl[i];
+ efw[1][i] *= hNl[i];
+
+ // Ooura fft returns incorrect sign on imaginary component. It matters
+ // here because we are making an additive change with comfort noise.
+ efw[1][i] *= -1;
+ }
+}
+
+void WebRtcAec_InitAec_neon(void) {
+ WebRtcAec_FilterAdaptation = FilterAdaptationNEON;
+ WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressNEON;
+}
+
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.c
index a19e8877bbc..7731b37b224 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.c
@@ -116,7 +116,7 @@ static void bitrv2_32(int* ip, float* a) {
}
}
-static void bitrv2_128(float* a) {
+static void bitrv2_128_C(float* a) {
/*
Following things have been attempted but are no faster:
(a) Storing the swap indexes in a LUT (index calculations are done
@@ -512,7 +512,7 @@ static void cftmdl_128_C(float* a) {
}
}
-static void cftfsub_128(float* a) {
+static void cftfsub_128_C(float* a) {
int j, j1, j2, j3, l;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
@@ -542,7 +542,7 @@ static void cftfsub_128(float* a) {
}
}
-static void cftbsub_128(float* a) {
+static void cftbsub_128_C(float* a) {
int j, j1, j2, j3, l;
float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
@@ -640,17 +640,26 @@ rft_sub_128_t cft1st_128;
rft_sub_128_t cftmdl_128;
rft_sub_128_t rftfsub_128;
rft_sub_128_t rftbsub_128;
+rft_sub_128_t cftfsub_128;
+rft_sub_128_t cftbsub_128;
+rft_sub_128_t bitrv2_128;
void aec_rdft_init(void) {
cft1st_128 = cft1st_128_C;
cftmdl_128 = cftmdl_128_C;
rftfsub_128 = rftfsub_128_C;
rftbsub_128 = rftbsub_128_C;
+ cftfsub_128 = cftfsub_128_C;
+ cftbsub_128 = cftbsub_128_C;
+ bitrv2_128 = bitrv2_128_C;
#if defined(WEBRTC_ARCH_X86_FAMILY)
if (WebRtc_GetCPUInfo(kSSE2)) {
aec_rdft_init_sse2();
}
#endif
+#if defined(MIPS_FPU_LE)
+ aec_rdft_init_mips();
+#endif
// init library constants.
makewt_32();
makect_32();
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.h b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.h
index 3380633ce6c..795c57d44c2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft.h
@@ -33,13 +33,13 @@ extern float rdft_w[64];
extern float rdft_wk3ri_first[32];
extern float rdft_wk3ri_second[32];
// constants used by SSE2 but initialized in C path.
-extern float rdft_wk1r[32];
-extern float rdft_wk2r[32];
-extern float rdft_wk3r[32];
-extern float rdft_wk1i[32];
-extern float rdft_wk2i[32];
-extern float rdft_wk3i[32];
-extern float cftmdl_wk1r[4];
+extern ALIGN16_BEG float ALIGN16_END rdft_wk1r[32];
+extern ALIGN16_BEG float ALIGN16_END rdft_wk2r[32];
+extern ALIGN16_BEG float ALIGN16_END rdft_wk3r[32];
+extern ALIGN16_BEG float ALIGN16_END rdft_wk1i[32];
+extern ALIGN16_BEG float ALIGN16_END rdft_wk2i[32];
+extern ALIGN16_BEG float ALIGN16_END rdft_wk3i[32];
+extern ALIGN16_BEG float ALIGN16_END cftmdl_wk1r[4];
// code path selection function pointers
typedef void (*rft_sub_128_t)(float* a);
@@ -47,6 +47,9 @@ extern rft_sub_128_t rftfsub_128;
extern rft_sub_128_t rftbsub_128;
extern rft_sub_128_t cft1st_128;
extern rft_sub_128_t cftmdl_128;
+extern rft_sub_128_t cftfsub_128;
+extern rft_sub_128_t cftbsub_128;
+extern rft_sub_128_t bitrv2_128;
// entry points
void aec_rdft_init(void);
@@ -54,4 +57,8 @@ void aec_rdft_init_sse2(void);
void aec_rdft_forward_128(float* a);
void aec_rdft_inverse_128(float* a);
+#if defined(MIPS_FPU_LE)
+void aec_rdft_init_mips(void);
+#endif
+
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AEC_MAIN_SOURCE_AEC_RDFT_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft_mips.c b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft_mips.c
new file mode 100644
index 00000000000..a0dac5f135c
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/aec_rdft_mips.c
@@ -0,0 +1,1213 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
+#include "webrtc/typedefs.h"
+
+static void bitrv2_128_mips(float *a) {
+ // n is 128
+ float xr, xi, yr, yi;
+
+ xr = a[8];
+ xi = a[9];
+ yr = a[16];
+ yi = a[17];
+ a[8] = yr;
+ a[9] = yi;
+ a[16] = xr;
+ a[17] = xi;
+
+ xr = a[64];
+ xi = a[65];
+ yr = a[2];
+ yi = a[3];
+ a[64] = yr;
+ a[65] = yi;
+ a[2] = xr;
+ a[3] = xi;
+
+ xr = a[72];
+ xi = a[73];
+ yr = a[18];
+ yi = a[19];
+ a[72] = yr;
+ a[73] = yi;
+ a[18] = xr;
+ a[19] = xi;
+
+ xr = a[80];
+ xi = a[81];
+ yr = a[10];
+ yi = a[11];
+ a[80] = yr;
+ a[81] = yi;
+ a[10] = xr;
+ a[11] = xi;
+
+ xr = a[88];
+ xi = a[89];
+ yr = a[26];
+ yi = a[27];
+ a[88] = yr;
+ a[89] = yi;
+ a[26] = xr;
+ a[27] = xi;
+
+ xr = a[74];
+ xi = a[75];
+ yr = a[82];
+ yi = a[83];
+ a[74] = yr;
+ a[75] = yi;
+ a[82] = xr;
+ a[83] = xi;
+
+ xr = a[32];
+ xi = a[33];
+ yr = a[4];
+ yi = a[5];
+ a[32] = yr;
+ a[33] = yi;
+ a[4] = xr;
+ a[5] = xi;
+
+ xr = a[40];
+ xi = a[41];
+ yr = a[20];
+ yi = a[21];
+ a[40] = yr;
+ a[41] = yi;
+ a[20] = xr;
+ a[21] = xi;
+
+ xr = a[48];
+ xi = a[49];
+ yr = a[12];
+ yi = a[13];
+ a[48] = yr;
+ a[49] = yi;
+ a[12] = xr;
+ a[13] = xi;
+
+ xr = a[56];
+ xi = a[57];
+ yr = a[28];
+ yi = a[29];
+ a[56] = yr;
+ a[57] = yi;
+ a[28] = xr;
+ a[29] = xi;
+
+ xr = a[34];
+ xi = a[35];
+ yr = a[68];
+ yi = a[69];
+ a[34] = yr;
+ a[35] = yi;
+ a[68] = xr;
+ a[69] = xi;
+
+ xr = a[42];
+ xi = a[43];
+ yr = a[84];
+ yi = a[85];
+ a[42] = yr;
+ a[43] = yi;
+ a[84] = xr;
+ a[85] = xi;
+
+ xr = a[50];
+ xi = a[51];
+ yr = a[76];
+ yi = a[77];
+ a[50] = yr;
+ a[51] = yi;
+ a[76] = xr;
+ a[77] = xi;
+
+ xr = a[58];
+ xi = a[59];
+ yr = a[92];
+ yi = a[93];
+ a[58] = yr;
+ a[59] = yi;
+ a[92] = xr;
+ a[93] = xi;
+
+ xr = a[44];
+ xi = a[45];
+ yr = a[52];
+ yi = a[53];
+ a[44] = yr;
+ a[45] = yi;
+ a[52] = xr;
+ a[53] = xi;
+
+ xr = a[96];
+ xi = a[97];
+ yr = a[6];
+ yi = a[7];
+ a[96] = yr;
+ a[97] = yi;
+ a[6] = xr;
+ a[7] = xi;
+
+ xr = a[104];
+ xi = a[105];
+ yr = a[22];
+ yi = a[23];
+ a[104] = yr;
+ a[105] = yi;
+ a[22] = xr;
+ a[23] = xi;
+
+ xr = a[112];
+ xi = a[113];
+ yr = a[14];
+ yi = a[15];
+ a[112] = yr;
+ a[113] = yi;
+ a[14] = xr;
+ a[15] = xi;
+
+ xr = a[120];
+ xi = a[121];
+ yr = a[30];
+ yi = a[31];
+ a[120] = yr;
+ a[121] = yi;
+ a[30] = xr;
+ a[31] = xi;
+
+ xr = a[98];
+ xi = a[99];
+ yr = a[70];
+ yi = a[71];
+ a[98] = yr;
+ a[99] = yi;
+ a[70] = xr;
+ a[71] = xi;
+
+ xr = a[106];
+ xi = a[107];
+ yr = a[86];
+ yi = a[87];
+ a[106] = yr;
+ a[107] = yi;
+ a[86] = xr;
+ a[87] = xi;
+
+ xr = a[114];
+ xi = a[115];
+ yr = a[78];
+ yi = a[79];
+ a[114] = yr;
+ a[115] = yi;
+ a[78] = xr;
+ a[79] = xi;
+
+ xr = a[122];
+ xi = a[123];
+ yr = a[94];
+ yi = a[95];
+ a[122] = yr;
+ a[123] = yi;
+ a[94] = xr;
+ a[95] = xi;
+
+ xr = a[100];
+ xi = a[101];
+ yr = a[38];
+ yi = a[39];
+ a[100] = yr;
+ a[101] = yi;
+ a[38] = xr;
+ a[39] = xi;
+
+ xr = a[108];
+ xi = a[109];
+ yr = a[54];
+ yi = a[55];
+ a[108] = yr;
+ a[109] = yi;
+ a[54] = xr;
+ a[55] = xi;
+
+ xr = a[116];
+ xi = a[117];
+ yr = a[46];
+ yi = a[47];
+ a[116] = yr;
+ a[117] = yi;
+ a[46] = xr;
+ a[47] = xi;
+
+ xr = a[124];
+ xi = a[125];
+ yr = a[62];
+ yi = a[63];
+ a[124] = yr;
+ a[125] = yi;
+ a[62] = xr;
+ a[63] = xi;
+
+ xr = a[110];
+ xi = a[111];
+ yr = a[118];
+ yi = a[119];
+ a[110] = yr;
+ a[111] = yi;
+ a[118] = xr;
+ a[119] = xi;
+}
+
+static void cft1st_128_mips(float *a) {
+ float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
+ float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+
+ float f0, f1, f2, f3, f4, f5, f6, f7;
+ int a_ptr, p1_rdft, p2_rdft, count;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ // first 16
+ "lwc1 %[f0], 0(%[a]) \n\t"
+ "lwc1 %[f1], 4(%[a]) \n\t"
+ "lwc1 %[f2], 8(%[a]) \n\t"
+ "lwc1 %[f3], 12(%[a]) \n\t"
+ "lwc1 %[f4], 16(%[a]) \n\t"
+ "lwc1 %[f5], 20(%[a]) \n\t"
+ "lwc1 %[f6], 24(%[a]) \n\t"
+ "lwc1 %[f7], 28(%[a]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "sub.s %[f4], %[x0r], %[x2r] \n\t"
+ "sub.s %[f5], %[x0i], %[x2i] \n\t"
+ "sub.s %[f2], %[x1r], %[x3i] \n\t"
+ "add.s %[f3], %[x1i], %[x3r] \n\t"
+ "add.s %[f6], %[x1r], %[x3i] \n\t"
+ "sub.s %[f7], %[x1i], %[x3r] \n\t"
+ "swc1 %[f0], 0(%[a]) \n\t"
+ "swc1 %[f1], 4(%[a]) \n\t"
+ "swc1 %[f2], 8(%[a]) \n\t"
+ "swc1 %[f3], 12(%[a]) \n\t"
+ "swc1 %[f4], 16(%[a]) \n\t"
+ "swc1 %[f5], 20(%[a]) \n\t"
+ "swc1 %[f6], 24(%[a]) \n\t"
+ "swc1 %[f7], 28(%[a]) \n\t"
+ "lwc1 %[f0], 32(%[a]) \n\t"
+ "lwc1 %[f1], 36(%[a]) \n\t"
+ "lwc1 %[f2], 40(%[a]) \n\t"
+ "lwc1 %[f3], 44(%[a]) \n\t"
+ "lwc1 %[f4], 48(%[a]) \n\t"
+ "lwc1 %[f5], 52(%[a]) \n\t"
+ "lwc1 %[f6], 56(%[a]) \n\t"
+ "lwc1 %[f7], 60(%[a]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "lwc1 %[wk2r], 8(%[rdft_w]) \n\t"
+ "add.s %[f3], %[x1i], %[x3r] \n\t"
+ "sub.s %[f2], %[x1r], %[x3i] \n\t"
+ "add.s %[f6], %[x3i], %[x1r] \n\t"
+ "sub.s %[f7], %[x3r], %[x1i] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "sub.s %[x1r], %[f2], %[f3] \n\t"
+ "add.s %[x1i], %[f3], %[f2] \n\t"
+ "sub.s %[x3r], %[f7], %[f6] \n\t"
+ "add.s %[x3i], %[f7], %[f6] \n\t"
+ "sub.s %[f4], %[x0r], %[x2r] \n\t"
+ "mul.s %[f2], %[wk2r], %[x1r] \n\t"
+ "mul.s %[f3], %[wk2r], %[x1i] \n\t"
+ "mul.s %[f6], %[wk2r], %[x3r] \n\t"
+ "mul.s %[f7], %[wk2r], %[x3i] \n\t"
+ "sub.s %[f5], %[x2i], %[x0i] \n\t"
+ "swc1 %[f0], 32(%[a]) \n\t"
+ "swc1 %[f1], 36(%[a]) \n\t"
+ "swc1 %[f2], 40(%[a]) \n\t"
+ "swc1 %[f3], 44(%[a]) \n\t"
+ "swc1 %[f5], 48(%[a]) \n\t"
+ "swc1 %[f4], 52(%[a]) \n\t"
+ "swc1 %[f6], 56(%[a]) \n\t"
+ "swc1 %[f7], 60(%[a]) \n\t"
+ // prepare for loop
+ "addiu %[a_ptr], %[a], 64 \n\t"
+ "addiu %[p1_rdft], %[rdft_w], 8 \n\t"
+ "addiu %[p2_rdft], %[rdft_w], 16 \n\t"
+ "addiu %[count], $zero, 7 \n\t"
+ // loop
+ "1: \n\t"
+ "lwc1 %[f0], 0(%[a_ptr]) \n\t"
+ "lwc1 %[f1], 4(%[a_ptr]) \n\t"
+ "lwc1 %[f2], 8(%[a_ptr]) \n\t"
+ "lwc1 %[f3], 12(%[a_ptr]) \n\t"
+ "lwc1 %[f4], 16(%[a_ptr]) \n\t"
+ "lwc1 %[f5], 20(%[a_ptr]) \n\t"
+ "lwc1 %[f6], 24(%[a_ptr]) \n\t"
+ "lwc1 %[f7], 28(%[a_ptr]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "lwc1 %[wk2i], 4(%[p1_rdft]) \n\t"
+ "sub.s %[f0], %[x0r], %[x2r] \n\t"
+ "sub.s %[f1], %[x0i], %[x2i] \n\t"
+ "add.s %[f2], %[x1i], %[x3r] \n\t"
+ "sub.s %[f3], %[x1r], %[x3i] \n\t"
+ "lwc1 %[wk1r], 0(%[p2_rdft]) \n\t"
+ "add.s %[f4], %[x1r], %[x3i] \n\t"
+ "sub.s %[f5], %[x1i], %[x3r] \n\t"
+ "lwc1 %[wk3r], 8(%[first]) \n\t"
+ "mul.s %[x3r], %[wk2r], %[f0] \n\t"
+ "mul.s %[x3i], %[wk2r], %[f1] \n\t"
+ "mul.s %[x1r], %[wk1r], %[f3] \n\t"
+ "mul.s %[x1i], %[wk1r], %[f2] \n\t"
+ "lwc1 %[wk1i], 4(%[p2_rdft]) \n\t"
+ "mul.s %[f6], %[wk3r], %[f4] \n\t"
+ "mul.s %[f7], %[wk3r], %[f5] \n\t"
+ "lwc1 %[wk3i], 12(%[first]) \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[wk1r], %[wk2i], %[f1] \n\t"
+ "mul.s %[f0], %[wk2i], %[f0] \n\t"
+ "sub.s %[x3r], %[x3r], %[wk1r] \n\t"
+ "add.s %[x3i], %[x3i], %[f0] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "mul.s %[x0r], %[wk1i], %[f2] \n\t"
+ "mul.s %[f3], %[wk1i], %[f3] \n\t"
+ "mul.s %[x2r], %[wk3i], %[f5] \n\t"
+ "mul.s %[f4], %[wk3i], %[f4] \n\t"
+ "sub.s %[x1r], %[x1r], %[x0r] \n\t"
+ "add.s %[x1i], %[x1i], %[f3] \n\t"
+ "sub.s %[f6], %[f6], %[x2r] \n\t"
+ "add.s %[f7], %[f7], %[f4] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmsub.s %[x3r], %[x3r], %[wk2i], %[f1] \n\t"
+ "madd.s %[x3i], %[x3i], %[wk2i], %[f0] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "nmsub.s %[x1r], %[x1r], %[wk1i], %[f2] \n\t"
+ "madd.s %[x1i], %[x1i], %[wk1i], %[f3] \n\t"
+ "nmsub.s %[f6], %[f6], %[wk3i], %[f5] \n\t"
+ "madd.s %[f7], %[f7], %[wk3i], %[f4] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[f0], 0(%[a_ptr]) \n\t"
+ "swc1 %[f1], 4(%[a_ptr]) \n\t"
+ "swc1 %[x1r], 8(%[a_ptr]) \n\t"
+ "swc1 %[x1i], 12(%[a_ptr]) \n\t"
+ "swc1 %[x3r], 16(%[a_ptr]) \n\t"
+ "swc1 %[x3i], 20(%[a_ptr]) \n\t"
+ "swc1 %[f6], 24(%[a_ptr]) \n\t"
+ "swc1 %[f7], 28(%[a_ptr]) \n\t"
+ "lwc1 %[f0], 32(%[a_ptr]) \n\t"
+ "lwc1 %[f1], 36(%[a_ptr]) \n\t"
+ "lwc1 %[f2], 40(%[a_ptr]) \n\t"
+ "lwc1 %[f3], 44(%[a_ptr]) \n\t"
+ "lwc1 %[f4], 48(%[a_ptr]) \n\t"
+ "lwc1 %[f5], 52(%[a_ptr]) \n\t"
+ "lwc1 %[f6], 56(%[a_ptr]) \n\t"
+ "lwc1 %[f7], 60(%[a_ptr]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "lwc1 %[wk1r], 8(%[p2_rdft]) \n\t"
+ "sub.s %[f0], %[x0r], %[x2r] \n\t"
+ "sub.s %[f1], %[x0i], %[x2i] \n\t"
+ "add.s %[f2], %[x1i], %[x3r] \n\t"
+ "sub.s %[f3], %[x1r], %[x3i] \n\t"
+ "add.s %[f4], %[x1r], %[x3i] \n\t"
+ "sub.s %[f5], %[x1i], %[x3r] \n\t"
+ "lwc1 %[wk3r], 8(%[second]) \n\t"
+ "mul.s %[x3r], %[wk2i], %[f0] \n\t"
+ "mul.s %[x3i], %[wk2i], %[f1] \n\t"
+ "mul.s %[x1r], %[wk1r], %[f3] \n\t"
+ "mul.s %[x1i], %[wk1r], %[f2] \n\t"
+ "mul.s %[f6], %[wk3r], %[f4] \n\t"
+ "mul.s %[f7], %[wk3r], %[f5] \n\t"
+ "lwc1 %[wk1i], 12(%[p2_rdft]) \n\t"
+ "lwc1 %[wk3i], 12(%[second]) \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[wk1r], %[wk2r], %[f1] \n\t"
+ "mul.s %[f0], %[wk2r], %[f0] \n\t"
+ "add.s %[x3r], %[x3r], %[wk1r] \n\t"
+ "neg.s %[x3r], %[x3r] \n\t"
+ "sub.s %[x3i], %[f0], %[x3i] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "mul.s %[x0r], %[wk1i], %[f2] \n\t"
+ "mul.s %[f3], %[wk1i], %[f3] \n\t"
+ "mul.s %[x2r], %[wk3i], %[f5] \n\t"
+ "mul.s %[f4], %[wk3i], %[f4] \n\t"
+ "sub.s %[x1r], %[x1r], %[x0r] \n\t"
+ "add.s %[x1i], %[x1i], %[f3] \n\t"
+ "sub.s %[f6], %[f6], %[x2r] \n\t"
+ "add.s %[f7], %[f7], %[f4] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmadd.s %[x3r], %[x3r], %[wk2r], %[f1] \n\t"
+ "msub.s %[x3i], %[x3i], %[wk2r], %[f0] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "nmsub.s %[x1r], %[x1r], %[wk1i], %[f2] \n\t"
+ "madd.s %[x1i], %[x1i], %[wk1i], %[f3] \n\t"
+ "nmsub.s %[f6], %[f6], %[wk3i], %[f5] \n\t"
+ "madd.s %[f7], %[f7], %[wk3i], %[f4] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[wk2r], 8(%[p1_rdft]) \n\t"
+ "addiu %[a_ptr], %[a_ptr], 64 \n\t"
+ "addiu %[p1_rdft], %[p1_rdft], 8 \n\t"
+ "addiu %[p2_rdft], %[p2_rdft], 16 \n\t"
+ "addiu %[first], %[first], 8 \n\t"
+ "swc1 %[f0], -32(%[a_ptr]) \n\t"
+ "swc1 %[f1], -28(%[a_ptr]) \n\t"
+ "swc1 %[x1r], -24(%[a_ptr]) \n\t"
+ "swc1 %[x1i], -20(%[a_ptr]) \n\t"
+ "swc1 %[x3r], -16(%[a_ptr]) \n\t"
+ "swc1 %[x3i], -12(%[a_ptr]) \n\t"
+ "swc1 %[f6], -8(%[a_ptr]) \n\t"
+ "swc1 %[f7], -4(%[a_ptr]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[second], %[second], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [x0r] "=&f" (x0r), [x0i] "=&f" (x0i), [x1r] "=&f" (x1r),
+ [x1i] "=&f" (x1i), [x2r] "=&f" (x2r), [x2i] "=&f" (x2i),
+ [x3r] "=&f" (x3r), [x3i] "=&f" (x3i), [wk1r] "=&f" (wk1r),
+ [wk1i] "=&f" (wk1i), [wk2r] "=&f" (wk2r), [wk2i] "=&f" (wk2i),
+ [wk3r] "=&f" (wk3r), [wk3i] "=&f" (wk3i), [a_ptr] "=&r" (a_ptr),
+ [p1_rdft] "=&r" (p1_rdft), [p2_rdft] "=&r" (p2_rdft),
+ [count] "=&r" (count)
+ : [a] "r" (a), [rdft_w] "r" (rdft_w), [first] "r" (rdft_wk3ri_first),
+ [second] "r" (rdft_wk3ri_second)
+ : "memory"
+ );
+}
+
+static void cftmdl_128_mips(float *a) {
+ float wk1r, wk1i, wk2r, wk2i, wk3r, wk3i;
+ float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+ float f0, f1, f2, f3, f4, f5, f6, f7;
+ int tmp_a, count;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmp_a], %[a], 0 \n\t"
+ "addiu %[count], $zero, 4 \n\t"
+ "1: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[f0], 0(%[tmp_a]) \n\t"
+ "lwc1 %[f1], 4(%[tmp_a]) \n\t"
+ "lwc1 %[f2], 32(%[tmp_a]) \n\t"
+ "lwc1 %[f3], 36(%[tmp_a]) \n\t"
+ "lwc1 %[f4], 64(%[tmp_a]) \n\t"
+ "lwc1 %[f5], 68(%[tmp_a]) \n\t"
+ "lwc1 %[f6], 96(%[tmp_a]) \n\t"
+ "lwc1 %[f7], 100(%[tmp_a]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "sub.s %[f4], %[x0r], %[x2r] \n\t"
+ "sub.s %[f5], %[x0i], %[x2i] \n\t"
+ "sub.s %[f2], %[x1r], %[x3i] \n\t"
+ "add.s %[f3], %[x1i], %[x3r] \n\t"
+ "add.s %[f6], %[x1r], %[x3i] \n\t"
+ "sub.s %[f7], %[x1i], %[x3r] \n\t"
+ "swc1 %[f0], 0(%[tmp_a]) \n\t"
+ "swc1 %[f1], 4(%[tmp_a]) \n\t"
+ "swc1 %[f2], 32(%[tmp_a]) \n\t"
+ "swc1 %[f3], 36(%[tmp_a]) \n\t"
+ "swc1 %[f4], 64(%[tmp_a]) \n\t"
+ "swc1 %[f5], 68(%[tmp_a]) \n\t"
+ "swc1 %[f6], 96(%[tmp_a]) \n\t"
+ "swc1 %[f7], 100(%[tmp_a]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[tmp_a], %[tmp_a], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [x0r] "=&f" (x0r), [x0i] "=&f" (x0i), [x1r] "=&f" (x1r),
+ [x1i] "=&f" (x1i), [x2r] "=&f" (x2r), [x2i] "=&f" (x2i),
+ [x3r] "=&f" (x3r), [x3i] "=&f" (x3i), [tmp_a] "=&r" (tmp_a),
+ [count] "=&r" (count)
+ : [a] "r" (a)
+ : "memory"
+ );
+ wk2r = rdft_w[2];
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmp_a], %[a], 128 \n\t"
+ "addiu %[count], $zero, 4 \n\t"
+ "1: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[f0], 0(%[tmp_a]) \n\t"
+ "lwc1 %[f1], 4(%[tmp_a]) \n\t"
+ "lwc1 %[f2], 32(%[tmp_a]) \n\t"
+ "lwc1 %[f3], 36(%[tmp_a]) \n\t"
+ "lwc1 %[f4], 64(%[tmp_a]) \n\t"
+ "lwc1 %[f5], 68(%[tmp_a]) \n\t"
+ "lwc1 %[f6], 96(%[tmp_a]) \n\t"
+ "lwc1 %[f7], 100(%[tmp_a]) \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[f0], %[x1r], %[x3i] \n\t"
+ "add.s %[f1], %[x1i], %[x3r] \n\t"
+ "sub.s %[f2], %[x3r], %[x1i] \n\t"
+ "add.s %[f3], %[x3i], %[x1r] \n\t"
+ "add.s %[f4], %[x0r], %[x2r] \n\t"
+ "add.s %[f5], %[x0i], %[x2i] \n\t"
+ "sub.s %[f6], %[f0], %[f1] \n\t"
+ "add.s %[f0], %[f0], %[f1] \n\t"
+ "sub.s %[f7], %[f2], %[f3] \n\t"
+ "add.s %[f2], %[f2], %[f3] \n\t"
+ "sub.s %[f1], %[x2i], %[x0i] \n\t"
+ "mul.s %[f6], %[f6], %[wk2r] \n\t"
+ "mul.s %[f0], %[f0], %[wk2r] \n\t"
+ "sub.s %[f3], %[x0r], %[x2r] \n\t"
+ "mul.s %[f7], %[f7], %[wk2r] \n\t"
+ "mul.s %[f2], %[f2], %[wk2r] \n\t"
+ "swc1 %[f4], 0(%[tmp_a]) \n\t"
+ "swc1 %[f5], 4(%[tmp_a]) \n\t"
+ "swc1 %[f6], 32(%[tmp_a]) \n\t"
+ "swc1 %[f0], 36(%[tmp_a]) \n\t"
+ "swc1 %[f1], 64(%[tmp_a]) \n\t"
+ "swc1 %[f3], 68(%[tmp_a]) \n\t"
+ "swc1 %[f7], 96(%[tmp_a]) \n\t"
+ "swc1 %[f2], 100(%[tmp_a]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[tmp_a], %[tmp_a], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [x0r] "=&f" (x0r), [x0i] "=&f" (x0i), [x1r] "=&f" (x1r),
+ [x1i] "=&f" (x1i), [x2r] "=&f" (x2r), [x2i] "=&f" (x2i),
+ [x3r] "=&f" (x3r), [x3i] "=&f" (x3i), [tmp_a] "=&r" (tmp_a),
+ [count] "=&r" (count)
+ : [a] "r" (a), [wk2r] "f" (wk2r)
+ : "memory"
+ );
+ wk2i = rdft_w[3];
+ wk1r = rdft_w[4];
+ wk1i = rdft_w[5];
+ wk3r = rdft_wk3ri_first[2];
+ wk3i = rdft_wk3ri_first[3];
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmp_a], %[a], 256 \n\t"
+ "addiu %[count], $zero, 4 \n\t"
+ "1: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[f0], 0(%[tmp_a]) \n\t"
+ "lwc1 %[f1], 4(%[tmp_a]) \n\t"
+ "lwc1 %[f2], 32(%[tmp_a]) \n\t"
+ "lwc1 %[f3], 36(%[tmp_a]) \n\t"
+ "lwc1 %[f4], 64(%[tmp_a]) \n\t"
+ "lwc1 %[f5], 68(%[tmp_a]) \n\t"
+ "lwc1 %[f6], 96(%[tmp_a]) \n\t"
+ "lwc1 %[f7], 100(%[tmp_a]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "sub.s %[f0], %[x0r], %[x2r] \n\t"
+ "sub.s %[f1], %[x0i], %[x2i] \n\t"
+ "add.s %[f2], %[x1i], %[x3r] \n\t"
+ "sub.s %[f3], %[x1r], %[x3i] \n\t"
+ "add.s %[f4], %[x1r], %[x3i] \n\t"
+ "sub.s %[f5], %[x1i], %[x3r] \n\t"
+ "mul.s %[x3r], %[wk2r], %[f0] \n\t"
+ "mul.s %[x3i], %[wk2r], %[f1] \n\t"
+ "mul.s %[x1r], %[wk1r], %[f3] \n\t"
+ "mul.s %[x1i], %[wk1r], %[f2] \n\t"
+ "mul.s %[f6], %[wk3r], %[f4] \n\t"
+ "mul.s %[f7], %[wk3r], %[f5] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[f1], %[wk2i], %[f1] \n\t"
+ "mul.s %[f0], %[wk2i], %[f0] \n\t"
+ "sub.s %[x3r], %[x3r], %[f1] \n\t"
+ "add.s %[x3i], %[x3i], %[f0] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "mul.s %[f2], %[wk1i], %[f2] \n\t"
+ "mul.s %[f3], %[wk1i], %[f3] \n\t"
+ "mul.s %[f5], %[wk3i], %[f5] \n\t"
+ "mul.s %[f4], %[wk3i], %[f4] \n\t"
+ "sub.s %[x1r], %[x1r], %[f2] \n\t"
+ "add.s %[x1i], %[x1i], %[f3] \n\t"
+ "sub.s %[f6], %[f6], %[f5] \n\t"
+ "add.s %[f7], %[f7], %[f4] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmsub.s %[x3r], %[x3r], %[wk2i], %[f1] \n\t"
+ "madd.s %[x3i], %[x3i], %[wk2i], %[f0] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "nmsub.s %[x1r], %[x1r], %[wk1i], %[f2] \n\t"
+ "madd.s %[x1i], %[x1i], %[wk1i], %[f3] \n\t"
+ "nmsub.s %[f6], %[f6], %[wk3i], %[f5] \n\t"
+ "madd.s %[f7], %[f7], %[wk3i], %[f4] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[f0], 0(%[tmp_a]) \n\t"
+ "swc1 %[f1], 4(%[tmp_a]) \n\t"
+ "swc1 %[x1r], 32(%[tmp_a]) \n\t"
+ "swc1 %[x1i], 36(%[tmp_a]) \n\t"
+ "swc1 %[x3r], 64(%[tmp_a]) \n\t"
+ "swc1 %[x3i], 68(%[tmp_a]) \n\t"
+ "swc1 %[f6], 96(%[tmp_a]) \n\t"
+ "swc1 %[f7], 100(%[tmp_a]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[tmp_a], %[tmp_a], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [x0r] "=&f" (x0r), [x0i] "=&f" (x0i), [x1r] "=&f" (x1r),
+ [x1i] "=&f" (x1i), [x2r] "=&f" (x2r), [x2i] "=&f" (x2i),
+ [x3r] "=&f" (x3r), [x3i] "=&f" (x3i), [tmp_a] "=&r" (tmp_a),
+ [count] "=&r" (count)
+ : [a] "r" (a), [wk1r] "f" (wk1r), [wk1i] "f" (wk1i), [wk2r] "f" (wk2r),
+ [wk2i] "f" (wk2i), [wk3r] "f" (wk3r), [wk3i] "f" (wk3i)
+ : "memory"
+ );
+
+ wk1r = rdft_w[6];
+ wk1i = rdft_w[7];
+ wk3r = rdft_wk3ri_second[2];
+ wk3i = rdft_wk3ri_second[3];
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmp_a], %[a], 384 \n\t"
+ "addiu %[count], $zero, 4 \n\t"
+ "1: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[f0], 0(%[tmp_a]) \n\t"
+ "lwc1 %[f1], 4(%[tmp_a]) \n\t"
+ "lwc1 %[f2], 32(%[tmp_a]) \n\t"
+ "lwc1 %[f3], 36(%[tmp_a]) \n\t"
+ "lwc1 %[f4], 64(%[tmp_a]) \n\t"
+ "lwc1 %[f5], 68(%[tmp_a]) \n\t"
+ "lwc1 %[f6], 96(%[tmp_a]) \n\t"
+ "lwc1 %[f7], 100(%[tmp_a]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "sub.s %[f0], %[x0r], %[x2r] \n\t"
+ "sub.s %[f1], %[x0i], %[x2i] \n\t"
+ "add.s %[f2], %[x1i], %[x3r] \n\t"
+ "sub.s %[f3], %[x1r], %[x3i] \n\t"
+ "add.s %[f4], %[x1r], %[x3i] \n\t"
+ "sub.s %[f5], %[x1i], %[x3r] \n\t"
+ "mul.s %[x3r], %[wk2i], %[f0] \n\t"
+ "mul.s %[x3i], %[wk2i], %[f1] \n\t"
+ "mul.s %[x1r], %[wk1r], %[f3] \n\t"
+ "mul.s %[x1i], %[wk1r], %[f2] \n\t"
+ "mul.s %[f6], %[wk3r], %[f4] \n\t"
+ "mul.s %[f7], %[wk3r], %[f5] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[f1], %[wk2r], %[f1] \n\t"
+ "mul.s %[f0], %[wk2r], %[f0] \n\t"
+ "add.s %[x3r], %[x3r], %[f1] \n\t"
+ "neg.s %[x3r], %[x3r] \n\t"
+ "sub.s %[x3i], %[f0], %[x3i] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "mul.s %[f2], %[wk1i], %[f2] \n\t"
+ "mul.s %[f3], %[wk1i], %[f3] \n\t"
+ "mul.s %[f5], %[wk3i], %[f5] \n\t"
+ "mul.s %[f4], %[wk3i], %[f4] \n\t"
+ "sub.s %[x1r], %[x1r], %[f2] \n\t"
+ "add.s %[x1i], %[x1i], %[f3] \n\t"
+ "sub.s %[f6], %[f6], %[f5] \n\t"
+ "add.s %[f7], %[f7], %[f4] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmadd.s %[x3r], %[x3r], %[wk2r], %[f1] \n\t"
+ "msub.s %[x3i], %[x3i], %[wk2r], %[f0] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "nmsub.s %[x1r], %[x1r], %[wk1i], %[f2] \n\t"
+ "madd.s %[x1i], %[x1i], %[wk1i], %[f3] \n\t"
+ "nmsub.s %[f6], %[f6], %[wk3i], %[f5] \n\t"
+ "madd.s %[f7], %[f7], %[wk3i], %[f4] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "swc1 %[f0], 0(%[tmp_a]) \n\t"
+ "swc1 %[f1], 4(%[tmp_a]) \n\t"
+ "swc1 %[x1r], 32(%[tmp_a]) \n\t"
+ "swc1 %[x1i], 36(%[tmp_a]) \n\t"
+ "swc1 %[x3r], 64(%[tmp_a]) \n\t"
+ "swc1 %[x3i], 68(%[tmp_a]) \n\t"
+ "swc1 %[f6], 96(%[tmp_a]) \n\t"
+ "swc1 %[f7], 100(%[tmp_a]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[tmp_a], %[tmp_a], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [x0r] "=&f" (x0r), [x0i] "=&f" (x0i), [x1r] "=&f" (x1r),
+ [x1i] "=&f" (x1i), [x2r] "=&f" (x2r), [x2i] "=&f" (x2i),
+ [x3r] "=&f" (x3r), [x3i] "=&f" (x3i), [tmp_a] "=&r" (tmp_a),
+ [count] "=&r" (count)
+ : [a] "r" (a), [wk1r] "f" (wk1r), [wk1i] "f" (wk1i), [wk2r] "f" (wk2r),
+ [wk2i] "f" (wk2i), [wk3r] "f" (wk3r), [wk3i] "f" (wk3i)
+ : "memory"
+ );
+}
+
+static void cftfsub_128_mips(float *a) {
+ float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i;
+ float f0, f1, f2, f3, f4, f5, f6, f7;
+ int tmp_a, count;
+
+ cft1st_128(a);
+ cftmdl_128(a);
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmp_a], %[a], 0 \n\t"
+ "addiu %[count], $zero, 16 \n\t"
+ "1: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[f0], 0(%[tmp_a]) \n\t"
+ "lwc1 %[f1], 4(%[tmp_a]) \n\t"
+ "lwc1 %[f2], 128(%[tmp_a]) \n\t"
+ "lwc1 %[f3], 132(%[tmp_a]) \n\t"
+ "lwc1 %[f4], 256(%[tmp_a]) \n\t"
+ "lwc1 %[f5], 260(%[tmp_a]) \n\t"
+ "lwc1 %[f6], 384(%[tmp_a]) \n\t"
+ "lwc1 %[f7], 388(%[tmp_a]) \n\t"
+ "add.s %[x0r], %[f0], %[f2] \n\t"
+ "add.s %[x0i], %[f1], %[f3] \n\t"
+ "add.s %[x2r], %[f4], %[f6] \n\t"
+ "add.s %[x2i], %[f5], %[f7] \n\t"
+ "sub.s %[x1r], %[f0], %[f2] \n\t"
+ "sub.s %[x1i], %[f1], %[f3] \n\t"
+ "sub.s %[x3r], %[f4], %[f6] \n\t"
+ "sub.s %[x3i], %[f5], %[f7] \n\t"
+ "add.s %[f0], %[x0r], %[x2r] \n\t"
+ "add.s %[f1], %[x0i], %[x2i] \n\t"
+ "sub.s %[f4], %[x0r], %[x2r] \n\t"
+ "sub.s %[f5], %[x0i], %[x2i] \n\t"
+ "sub.s %[f2], %[x1r], %[x3i] \n\t"
+ "add.s %[f3], %[x1i], %[x3r] \n\t"
+ "add.s %[f6], %[x1r], %[x3i] \n\t"
+ "sub.s %[f7], %[x1i], %[x3r] \n\t"
+ "swc1 %[f0], 0(%[tmp_a]) \n\t"
+ "swc1 %[f1], 4(%[tmp_a]) \n\t"
+ "swc1 %[f2], 128(%[tmp_a]) \n\t"
+ "swc1 %[f3], 132(%[tmp_a]) \n\t"
+ "swc1 %[f4], 256(%[tmp_a]) \n\t"
+ "swc1 %[f5], 260(%[tmp_a]) \n\t"
+ "swc1 %[f6], 384(%[tmp_a]) \n\t"
+ "swc1 %[f7], 388(%[tmp_a]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[tmp_a], %[tmp_a], 8 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [x0r] "=&f" (x0r), [x0i] "=&f" (x0i), [x1r] "=&f" (x1r),
+ [x1i] "=&f" (x1i), [x2r] "=&f" (x2r), [x2i] "=&f" (x2i),
+ [x3r] "=&f" (x3r), [x3i] "=&f" (x3i), [tmp_a] "=&r" (tmp_a),
+ [count] "=&r" (count)
+ : [a] "r" (a)
+ : "memory"
+ );
+}
+
+static void cftbsub_128_mips(float *a) {
+ float f0, f1, f2, f3, f4, f5, f6, f7;
+ float f8, f9, f10, f11, f12, f13, f14, f15;
+ float f16, f17, f18, f19, f20, f21, f22, f23;
+ int tmp_a, count;
+
+ cft1st_128(a);
+ cftmdl_128(a);
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "addiu %[tmp_a], %[a], 0 \n\t"
+ "addiu %[count], $zero, 8 \n\t"
+ "1: \n\t"
+ "addiu %[count], %[count], -1 \n\t"
+ "lwc1 %[f0], 0(%[tmp_a]) \n\t"
+ "lwc1 %[f1], 4(%[tmp_a]) \n\t"
+ "lwc1 %[f2], 128(%[tmp_a]) \n\t"
+ "lwc1 %[f3], 132(%[tmp_a]) \n\t"
+ "lwc1 %[f4], 256(%[tmp_a]) \n\t"
+ "lwc1 %[f5], 260(%[tmp_a]) \n\t"
+ "lwc1 %[f6], 384(%[tmp_a]) \n\t"
+ "lwc1 %[f7], 388(%[tmp_a]) \n\t"
+ "lwc1 %[f8], 8(%[tmp_a]) \n\t"
+ "lwc1 %[f9], 12(%[tmp_a]) \n\t"
+ "lwc1 %[f10], 136(%[tmp_a]) \n\t"
+ "lwc1 %[f11], 140(%[tmp_a]) \n\t"
+ "lwc1 %[f12], 264(%[tmp_a]) \n\t"
+ "lwc1 %[f13], 268(%[tmp_a]) \n\t"
+ "lwc1 %[f14], 392(%[tmp_a]) \n\t"
+ "lwc1 %[f15], 396(%[tmp_a]) \n\t"
+ "add.s %[f16], %[f0], %[f2] \n\t"
+ "add.s %[f17], %[f1], %[f3] \n\t"
+ "add.s %[f18], %[f4], %[f6] \n\t"
+ "add.s %[f19], %[f5], %[f7] \n\t"
+ "sub.s %[f20], %[f0], %[f2] \n\t"
+ "sub.s %[f21], %[f3], %[f1] \n\t"
+ "sub.s %[f22], %[f4], %[f6] \n\t"
+ "sub.s %[f23], %[f5], %[f7] \n\t"
+ "add.s %[f0], %[f8], %[f10] \n\t"
+ "add.s %[f1], %[f9], %[f11] \n\t"
+ "add.s %[f2], %[f12], %[f14] \n\t"
+ "add.s %[f3], %[f13], %[f15] \n\t"
+ "sub.s %[f4], %[f8], %[f10] \n\t"
+ "sub.s %[f5], %[f11], %[f9] \n\t"
+ "sub.s %[f6], %[f12], %[f14] \n\t"
+ "sub.s %[f7], %[f13], %[f15] \n\t"
+ "add.s %[f8], %[f16], %[f18] \n\t"
+ "add.s %[f9], %[f17], %[f19] \n\t"
+ "sub.s %[f12], %[f16], %[f18] \n\t"
+ "sub.s %[f13], %[f19], %[f17] \n\t"
+ "sub.s %[f10], %[f20], %[f23] \n\t"
+ "sub.s %[f11], %[f21], %[f22] \n\t"
+ "add.s %[f14], %[f20], %[f23] \n\t"
+ "add.s %[f15], %[f21], %[f22] \n\t"
+ "neg.s %[f9], %[f9] \n\t"
+ "add.s %[f16], %[f0], %[f2] \n\t"
+ "add.s %[f17], %[f1], %[f3] \n\t"
+ "sub.s %[f20], %[f0], %[f2] \n\t"
+ "sub.s %[f21], %[f3], %[f1] \n\t"
+ "sub.s %[f18], %[f4], %[f7] \n\t"
+ "sub.s %[f19], %[f5], %[f6] \n\t"
+ "add.s %[f22], %[f4], %[f7] \n\t"
+ "add.s %[f23], %[f5], %[f6] \n\t"
+ "neg.s %[f17], %[f17] \n\t"
+ "swc1 %[f8], 0(%[tmp_a]) \n\t"
+ "swc1 %[f10], 128(%[tmp_a]) \n\t"
+ "swc1 %[f11], 132(%[tmp_a]) \n\t"
+ "swc1 %[f12], 256(%[tmp_a]) \n\t"
+ "swc1 %[f13], 260(%[tmp_a]) \n\t"
+ "swc1 %[f14], 384(%[tmp_a]) \n\t"
+ "swc1 %[f15], 388(%[tmp_a]) \n\t"
+ "swc1 %[f9], 4(%[tmp_a]) \n\t"
+ "swc1 %[f16], 8(%[tmp_a]) \n\t"
+ "swc1 %[f18], 136(%[tmp_a]) \n\t"
+ "swc1 %[f19], 140(%[tmp_a]) \n\t"
+ "swc1 %[f20], 264(%[tmp_a]) \n\t"
+ "swc1 %[f21], 268(%[tmp_a]) \n\t"
+ "swc1 %[f22], 392(%[tmp_a]) \n\t"
+ "swc1 %[f23], 396(%[tmp_a]) \n\t"
+ "swc1 %[f17], 12(%[tmp_a]) \n\t"
+ "bgtz %[count], 1b \n\t"
+ " addiu %[tmp_a], %[tmp_a], 16 \n\t"
+ ".set pop \n\t"
+ : [f0] "=&f" (f0), [f1] "=&f" (f1), [f2] "=&f" (f2), [f3] "=&f" (f3),
+ [f4] "=&f" (f4), [f5] "=&f" (f5), [f6] "=&f" (f6), [f7] "=&f" (f7),
+ [f8] "=&f" (f8), [f9] "=&f" (f9), [f10] "=&f" (f10), [f11] "=&f" (f11),
+ [f12] "=&f" (f12), [f13] "=&f" (f13), [f14] "=&f" (f14),
+ [f15] "=&f" (f15), [f16] "=&f" (f16), [f17] "=&f" (f17),
+ [f18] "=&f" (f18), [f19] "=&f" (f19), [f20] "=&f" (f20),
+ [f21] "=&f" (f21), [f22] "=&f" (f22), [f23] "=&f" (f23),
+ [tmp_a] "=&r" (tmp_a), [count] "=&r" (count)
+ : [a] "r" (a)
+ : "memory"
+ );
+}
+
+static void rftfsub_128_mips(float *a) {
+ const float *c = rdft_w + 32;
+ float wkr, wki, xr, xi, yr, yi;
+ const float temp = 0.5f;
+ float aj20=0, aj21=0, ak20=0, ak21=0, ck1=0;
+ float *a1 = a;
+ float *a2 = a;
+ float *c1 = rdft_w + 33;
+ float *c2 = c1 + 30;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwc1 %[aj20], 8(%[a2]) \n\t"
+ "lwc1 %[ak20], 504(%[a1]) \n\t"
+ "lwc1 %[ck1], 0(%[c2]) \n\t"
+ "lwc1 %[aj21], 12(%[a2]) \n\t"
+ "lwc1 %[ak21], 508(%[a1]) \n\t"
+ "sub.s %[wkr], %[temp], %[ck1] \n\t"
+ "sub.s %[xr], %[aj20], %[ak20] \n\t"
+ "add.s %[xi], %[aj21], %[ak21] \n\t"
+ "lwc1 %[wki], 0(%[c1]) \n\t"
+ "addiu %[c2], %[c2],-4 \n\t"
+ "mul.s %[yr], %[wkr], %[xr] \n\t"
+ "mul.s %[yi], %[wkr], %[xi] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[xi], %[wki], %[xi] \n\t"
+ "mul.s %[xr], %[wki], %[xr] \n\t"
+ "sub.s %[yr], %[yr], %[xi] \n\t"
+ "add.s %[yi], %[yi], %[xr] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmsub.s %[yr], %[yr], %[wki], %[xi] \n\t"
+ "madd.s %[yi], %[yi], %[wki], %[xr] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "addiu %[c1], %[c1], 4 \n\t"
+ "sub.s %[aj20], %[aj20], %[yr] \n\t"
+ "sub.s %[aj21], %[aj21], %[yi] \n\t"
+ "add.s %[ak20], %[ak20], %[yr] \n\t"
+ "sub.s %[ak21], %[ak21], %[yi] \n\t"
+ "addiu %[a2], %[a2], 8 \n\t"
+ "swc1 %[aj20], 0(%[a2]) \n\t"
+ "swc1 %[aj21], 4(%[a2]) \n\t"
+ "swc1 %[ak20], 504(%[a1]) \n\t"
+ "swc1 %[ak21], 508(%[a1]) \n\t"
+ "addiu %[a1], %[a1], -8 \n\t"
+ //15x2 passes:
+ "1: \n\t"
+ "lwc1 %[ck1], 0(%[c2]) \n\t"
+ "lwc1 %[aj20], 8(%[a2]) \n\t"
+ "lwc1 %[aj21], 12(%[a2]) \n\t"
+ "lwc1 %[ak20], 504(%[a1]) \n\t"
+ "lwc1 %[ak21], 508(%[a1]) \n\t"
+ "lwc1 $f0, -4(%[c2]) \n\t"
+ "lwc1 $f2, 16(%[a2]) \n\t"
+ "lwc1 $f3, 20(%[a2]) \n\t"
+ "lwc1 $f8, 496(%[a1]) \n\t"
+ "lwc1 $f7, 500(%[a1]) \n\t"
+ "sub.s %[wkr], %[temp], %[ck1] \n\t"
+ "sub.s %[xr], %[aj20], %[ak20] \n\t"
+ "add.s %[xi], %[aj21], %[ak21] \n\t"
+ "lwc1 %[wki], 0(%[c1]) \n\t"
+ "sub.s $f0, %[temp], $f0 \n\t"
+ "sub.s $f6, $f2, $f8 \n\t"
+ "add.s $f4, $f3, $f7 \n\t"
+ "lwc1 $f5, 4(%[c1]) \n\t"
+ "mul.s %[yr], %[wkr], %[xr] \n\t"
+ "mul.s %[yi], %[wkr], %[xi] \n\t"
+ "mul.s $f1, $f0, $f6 \n\t"
+ "mul.s $f0, $f0, $f4 \n\t"
+ "addiu %[c2], %[c2], -8 \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[xi], %[wki], %[xi] \n\t"
+ "mul.s %[xr], %[wki], %[xr] \n\t"
+ "mul.s $f4, $f5, $f4 \n\t"
+ "mul.s $f6, $f5, $f6 \n\t"
+ "sub.s %[yr], %[yr], %[xi] \n\t"
+ "add.s %[yi], %[yi], %[xr] \n\t"
+ "sub.s $f1, $f1, $f4 \n\t"
+ "add.s $f0, $f0, $f6 \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "nmsub.s %[yr], %[yr], %[wki], %[xi] \n\t"
+ "madd.s %[yi], %[yi], %[wki], %[xr] \n\t"
+ "nmsub.s $f1, $f1, $f5, $f4 \n\t"
+ "madd.s $f0, $f0, $f5, $f6 \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "addiu %[c1], %[c1], 8 \n\t"
+ "sub.s %[aj20], %[aj20], %[yr] \n\t"
+ "sub.s %[aj21], %[aj21], %[yi] \n\t"
+ "add.s %[ak20], %[ak20], %[yr] \n\t"
+ "sub.s %[ak21], %[ak21], %[yi] \n\t"
+ "sub.s $f2, $f2, $f1 \n\t"
+ "sub.s $f3, $f3, $f0 \n\t"
+ "add.s $f1, $f8, $f1 \n\t"
+ "sub.s $f0, $f7, $f0 \n\t"
+ "swc1 %[aj20], 8(%[a2]) \n\t"
+ "swc1 %[aj21], 12(%[a2]) \n\t"
+ "swc1 %[ak20], 504(%[a1]) \n\t"
+ "swc1 %[ak21], 508(%[a1]) \n\t"
+ "swc1 $f2, 16(%[a2]) \n\t"
+ "swc1 $f3, 20(%[a2]) \n\t"
+ "swc1 $f1, 496(%[a1]) \n\t"
+ "swc1 $f0, 500(%[a1]) \n\t"
+ "addiu %[a2], %[a2], 16 \n\t"
+ "bne %[c2], %[c], 1b \n\t"
+ " addiu %[a1], %[a1], -16 \n\t"
+ ".set pop \n\t"
+ : [a] "+r" (a), [c] "+r" (c), [a1] "+r" (a1), [a2] "+r" (a2),
+ [c1] "+r" (c1), [c2] "+r" (c2), [wkr] "=&f" (wkr), [wki] "=&f" (wki),
+ [xr] "=&f" (xr), [xi] "=&f" (xi), [yr] "=&f" (yr), [yi] "=&f" (yi),
+ [aj20] "=&f" (aj20), [aj21] "=&f" (aj21), [ak20] "=&f" (ak20),
+ [ak21] "=&f" (ak21), [ck1] "=&f" (ck1)
+ : [temp] "f" (temp)
+ : "memory", "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8"
+ );
+}
+
+static void rftbsub_128_mips(float *a) {
+ const float *c = rdft_w + 32;
+ float wkr, wki, xr, xi, yr, yi;
+ a[1] = -a[1];
+ a[65] = -a[65];
+ const float temp = 0.5f;
+ float aj20=0, aj21=0, ak20=0, ak21=0, ck1=0;
+ float *a1 = a;
+ float *a2 = a;
+ float *c1 = rdft_w + 33;
+ float *c2 = c1 + 30;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "lwc1 %[aj20], 8(%[a2]) \n\t"
+ "lwc1 %[ak20], 504(%[a1]) \n\t"
+ "lwc1 %[ck1], 0(%[c2]) \n\t"
+ "lwc1 %[aj21], 12(%[a2]) \n\t"
+ "lwc1 %[ak21], 508(%[a1]) \n\t"
+ "sub.s %[wkr], %[temp], %[ck1] \n\t"
+ "sub.s %[xr], %[aj20], %[ak20] \n\t"
+ "add.s %[xi], %[aj21], %[ak21] \n\t"
+ "lwc1 %[wki], 0(%[c1]) \n\t"
+ "addiu %[c2], %[c2], -4 \n\t"
+ "mul.s %[yr], %[wkr], %[xr] \n\t"
+ "mul.s %[yi], %[wkr], %[xi] \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[xi], %[wki], %[xi] \n\t"
+ "mul.s %[xr], %[wki], %[xr] \n\t"
+ "add.s %[yr], %[yr], %[xi] \n\t"
+ "sub.s %[yi], %[yi], %[xr] \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[yr], %[yr], %[wki], %[xi] \n\t"
+ "nmsub.s %[yi], %[yi], %[wki], %[xr] \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "addiu %[c1], %[c1],4 \n\t"
+ "sub.s %[aj20], %[aj20], %[yr] \n\t"
+ "sub.s %[aj21], %[yi], %[aj21] \n\t"
+ "add.s %[ak20], %[ak20], %[yr] \n\t"
+ "sub.s %[ak21], %[yi], %[ak21] \n\t"
+ "addiu %[a2], %[a2], 8 \n\t"
+ "swc1 %[aj20], 0(%[a2]) \n\t"
+ "swc1 %[aj21], 4(%[a2]) \n\t"
+ "swc1 %[ak20], 504(%[a1]) \n\t"
+ "swc1 %[ak21], 508(%[a1]) \n\t"
+ "addiu %[a1], %[a1], -8 \n\t"
+ //15x2 passes:
+ "1: \n\t"
+ "lwc1 %[ck1], 0(%[c2]) \n\t"
+ "lwc1 %[aj20], 8(%[a2]) \n\t"
+ "lwc1 %[aj21], 12(%[a2]) \n\t"
+ "lwc1 %[ak20], 504(%[a1]) \n\t"
+ "lwc1 %[ak21], 508(%[a1]) \n\t"
+ "lwc1 $f0, -4(%[c2]) \n\t"
+ "lwc1 $f2, 16(%[a2]) \n\t"
+ "lwc1 $f3, 20(%[a2]) \n\t"
+ "lwc1 $f8, 496(%[a1]) \n\t"
+ "lwc1 $f7, 500(%[a1]) \n\t"
+ "sub.s %[wkr], %[temp], %[ck1] \n\t"
+ "sub.s %[xr], %[aj20], %[ak20] \n\t"
+ "add.s %[xi], %[aj21], %[ak21] \n\t"
+ "lwc1 %[wki], 0(%[c1]) \n\t"
+ "sub.s $f0, %[temp], $f0 \n\t"
+ "sub.s $f6, $f2, $f8 \n\t"
+ "add.s $f4, $f3, $f7 \n\t"
+ "lwc1 $f5, 4(%[c1]) \n\t"
+ "mul.s %[yr], %[wkr], %[xr] \n\t"
+ "mul.s %[yi], %[wkr], %[xi] \n\t"
+ "mul.s $f1, $f0, $f6 \n\t"
+ "mul.s $f0, $f0, $f4 \n\t"
+ "addiu %[c2], %[c2], -8 \n\t"
+#if !defined(MIPS32_R2_LE)
+ "mul.s %[xi], %[wki], %[xi] \n\t"
+ "mul.s %[xr], %[wki], %[xr] \n\t"
+ "mul.s $f4, $f5, $f4 \n\t"
+ "mul.s $f6, $f5, $f6 \n\t"
+ "add.s %[yr], %[yr], %[xi] \n\t"
+ "sub.s %[yi], %[yi], %[xr] \n\t"
+ "add.s $f1, $f1, $f4 \n\t"
+ "sub.s $f0, $f0, $f6 \n\t"
+#else // #if !defined(MIPS32_R2_LE)
+ "madd.s %[yr], %[yr], %[wki], %[xi] \n\t"
+ "nmsub.s %[yi], %[yi], %[wki], %[xr] \n\t"
+ "madd.s $f1, $f1, $f5, $f4 \n\t"
+ "nmsub.s $f0, $f0, $f5, $f6 \n\t"
+#endif // #if !defined(MIPS32_R2_LE)
+ "addiu %[c1], %[c1], 8 \n\t"
+ "sub.s %[aj20], %[aj20], %[yr] \n\t"
+ "sub.s %[aj21], %[yi], %[aj21] \n\t"
+ "add.s %[ak20], %[ak20], %[yr] \n\t"
+ "sub.s %[ak21], %[yi], %[ak21] \n\t"
+ "sub.s $f2, $f2, $f1 \n\t"
+ "sub.s $f3, $f0, $f3 \n\t"
+ "add.s $f1, $f8, $f1 \n\t"
+ "sub.s $f0, $f0, $f7 \n\t"
+ "swc1 %[aj20], 8(%[a2]) \n\t"
+ "swc1 %[aj21], 12(%[a2]) \n\t"
+ "swc1 %[ak20], 504(%[a1]) \n\t"
+ "swc1 %[ak21], 508(%[a1]) \n\t"
+ "swc1 $f2, 16(%[a2]) \n\t"
+ "swc1 $f3, 20(%[a2]) \n\t"
+ "swc1 $f1, 496(%[a1]) \n\t"
+ "swc1 $f0, 500(%[a1]) \n\t"
+ "addiu %[a2], %[a2], 16 \n\t"
+ "bne %[c2], %[c], 1b \n\t"
+ " addiu %[a1], %[a1], -16 \n\t"
+ ".set pop \n\t"
+ : [a] "+r" (a), [c] "+r" (c), [a1] "+r" (a1), [a2] "+r" (a2),
+ [c1] "+r" (c1), [c2] "+r" (c2), [wkr] "=&f" (wkr), [wki] "=&f" (wki),
+ [xr] "=&f" (xr), [xi] "=&f" (xi), [yr] "=&f" (yr), [yi] "=&f" (yi),
+ [aj20] "=&f" (aj20), [aj21] "=&f" (aj21), [ak20] "=&f" (ak20),
+ [ak21] "=&f" (ak21), [ck1] "=&f" (ck1)
+ : [temp] "f" (temp)
+ : "memory", "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8"
+ );
+}
+
+void aec_rdft_init_mips(void) {
+ cft1st_128 = cft1st_128_mips;
+ cftmdl_128 = cftmdl_128_mips;
+ rftfsub_128 = rftfsub_128_mips;
+ rftbsub_128 = rftbsub_128_mips;
+ cftfsub_128 = cftfsub_128_mips;
+ cftbsub_128 = cftbsub_128_mips;
+ bitrv2_128 = bitrv2_128_mips;
+}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c b/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c
index bbdd5f628b2..ba3b9243e19 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/echo_cancellation.c
@@ -104,18 +104,18 @@ int webrtc_aec_instance_count = 0;
static void EstBufDelayNormal(aecpc_t* aecInst);
static void EstBufDelayExtended(aecpc_t* aecInst);
static int ProcessNormal(aecpc_t* self,
- const int16_t* near,
- const int16_t* near_high,
- int16_t* out,
- int16_t* out_high,
+ const float* near,
+ const float* near_high,
+ float* out,
+ float* out_high,
int16_t num_samples,
int16_t reported_delay_ms,
int32_t skew);
static void ProcessExtended(aecpc_t* self,
- const int16_t* near,
- const int16_t* near_high,
- int16_t* out,
- int16_t* out_high,
+ const float* near,
+ const float* near_high,
+ float* out,
+ float* out_high,
int16_t num_samples,
int16_t reported_delay_ms,
int32_t skew);
@@ -254,7 +254,7 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq) {
aecpc->checkBuffSize = 1;
aecpc->firstVal = 0;
- aecpc->startup_phase = 1;
+ aecpc->startup_phase = WebRtcAec_reported_delay_enabled(aecpc->aec);
aecpc->bufSizeStart = 0;
aecpc->checkBufSizeCtr = 0;
aecpc->msInSndCardBuf = 0;
@@ -372,10 +372,10 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
}
int32_t WebRtcAec_Process(void* aecInst,
- const int16_t* nearend,
- const int16_t* nearendH,
- int16_t* out,
- int16_t* outH,
+ const float* nearend,
+ const float* nearendH,
+ float* out,
+ float* outH,
int16_t nrOfSamples,
int16_t msInSndCardBuf,
int32_t skew) {
@@ -632,10 +632,10 @@ AecCore* WebRtcAec_aec_core(void* handle) {
}
static int ProcessNormal(aecpc_t* aecpc,
- const int16_t* nearend,
- const int16_t* nearendH,
- int16_t* out,
- int16_t* outH,
+ const float* nearend,
+ const float* nearendH,
+ float* out,
+ float* outH,
int16_t nrOfSamples,
int16_t msInSndCardBuf,
int32_t skew) {
@@ -689,10 +689,10 @@ static int ProcessNormal(aecpc_t* aecpc,
if (aecpc->startup_phase) {
// Only needed if they don't already point to the same place.
if (nearend != out) {
- memcpy(out, nearend, sizeof(short) * nrOfSamples);
+ memcpy(out, nearend, sizeof(*out) * nrOfSamples);
}
if (nearendH != outH) {
- memcpy(outH, nearendH, sizeof(short) * nrOfSamples);
+ memcpy(outH, nearendH, sizeof(*outH) * nrOfSamples);
}
// The AEC is in the start up mode
@@ -766,7 +766,9 @@ static int ProcessNormal(aecpc_t* aecpc,
}
} else {
// AEC is enabled.
- EstBufDelayNormal(aecpc);
+ if (WebRtcAec_reported_delay_enabled(aecpc->aec)) {
+ EstBufDelayNormal(aecpc);
+ }
// Note that 1 frame is supported for NB and 2 frames for WB.
for (i = 0; i < nFrames; i++) {
@@ -787,10 +789,10 @@ static int ProcessNormal(aecpc_t* aecpc,
}
static void ProcessExtended(aecpc_t* self,
- const int16_t* near,
- const int16_t* near_high,
- int16_t* out,
- int16_t* out_high,
+ const float* near,
+ const float* near_high,
+ float* out,
+ float* out_high,
int16_t num_samples,
int16_t reported_delay_ms,
int32_t skew) {
@@ -821,10 +823,10 @@ static void ProcessExtended(aecpc_t* self,
if (!self->farend_started) {
// Only needed if they don't already point to the same place.
if (near != out) {
- memcpy(out, near, sizeof(short) * num_samples);
+ memcpy(out, near, sizeof(*out) * num_samples);
}
if (near_high != out_high) {
- memcpy(out_high, near_high, sizeof(short) * num_samples);
+ memcpy(out_high, near_high, sizeof(*out_high) * num_samples);
}
return;
}
@@ -842,7 +844,9 @@ static void ProcessExtended(aecpc_t* self,
self->startup_phase = 0;
}
- EstBufDelayExtended(self);
+ if (WebRtcAec_reported_delay_enabled(self->aec)) {
+ EstBufDelayExtended(self);
+ }
{
// |delay_diff_offset| gives us the option to manually rewind the delay on
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h b/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
index 4c852cf64bf..dc64a345c3e 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/include/echo_cancellation.h
@@ -68,7 +68,7 @@ extern "C" {
*
* Inputs Description
* -------------------------------------------------------------------
- * void **aecInst Pointer to the AEC instance to be created
+ * void** aecInst Pointer to the AEC instance to be created
* and initialized
*
* Outputs Description
@@ -83,7 +83,7 @@ int32_t WebRtcAec_Create(void** aecInst);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecInst Pointer to the AEC instance
+ * void* aecInst Pointer to the AEC instance
*
* Outputs Description
* -------------------------------------------------------------------
@@ -97,7 +97,7 @@ int32_t WebRtcAec_Free(void* aecInst);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecInst Pointer to the AEC instance
+ * void* aecInst Pointer to the AEC instance
* int32_t sampFreq Sampling frequency of data
* int32_t scSampFreq Soundcard sampling frequency
*
@@ -113,8 +113,8 @@ int32_t WebRtcAec_Init(void* aecInst, int32_t sampFreq, int32_t scSampFreq);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecInst Pointer to the AEC instance
- * int16_t *farend In buffer containing one frame of
+ * void* aecInst Pointer to the AEC instance
+ * int16_t* farend In buffer containing one frame of
* farend signal for L band
* int16_t nrOfSamples Number of samples in farend buffer
*
@@ -132,10 +132,10 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecInst Pointer to the AEC instance
- * int16_t *nearend In buffer containing one frame of
+ * void* aecInst Pointer to the AEC instance
+ * float* nearend In buffer containing one frame of
* nearend+echo signal for L band
- * int16_t *nearendH In buffer containing one frame of
+ * float* nearendH In buffer containing one frame of
* nearend+echo signal for H band
* int16_t nrOfSamples Number of samples in nearend buffer
* int16_t msInSndCardBuf Delay estimate for sound card and
@@ -146,18 +146,18 @@ int32_t WebRtcAec_BufferFarend(void* aecInst,
*
* Outputs Description
* -------------------------------------------------------------------
- * int16_t *out Out buffer, one frame of processed nearend
+ * float* out Out buffer, one frame of processed nearend
* for L band
- * int16_t *outH Out buffer, one frame of processed nearend
+ * float* outH Out buffer, one frame of processed nearend
* for H band
* int32_t return 0: OK
* -1: error
*/
int32_t WebRtcAec_Process(void* aecInst,
- const int16_t* nearend,
- const int16_t* nearendH,
- int16_t* out,
- int16_t* outH,
+ const float* nearend,
+ const float* nearendH,
+ float* out,
+ float* outH,
int16_t nrOfSamples,
int16_t msInSndCardBuf,
int32_t skew);
@@ -167,7 +167,7 @@ int32_t WebRtcAec_Process(void* aecInst,
*
* Inputs Description
* -------------------------------------------------------------------
- * void *handle Pointer to the AEC instance
+ * void* handle Pointer to the AEC instance
* AecConfig config Config instance that contains all
* properties to be set
*
@@ -183,11 +183,11 @@ int WebRtcAec_set_config(void* handle, AecConfig config);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *handle Pointer to the AEC instance
+ * void* handle Pointer to the AEC instance
*
* Outputs Description
* -------------------------------------------------------------------
- * int *status 0: Almost certainly nearend single-talk
+ * int* status 0: Almost certainly nearend single-talk
* 1: Might not be neared single-talk
* int return 0: OK
* -1: error
@@ -199,11 +199,11 @@ int WebRtcAec_get_echo_status(void* handle, int* status);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *handle Pointer to the AEC instance
+ * void* handle Pointer to the AEC instance
*
* Outputs Description
* -------------------------------------------------------------------
- * AecMetrics *metrics Struct which will be filled out with the
+ * AecMetrics* metrics Struct which will be filled out with the
* current echo metrics.
* int return 0: OK
* -1: error
@@ -232,7 +232,7 @@ int WebRtcAec_GetDelayMetrics(void* handle, int* median, int* std);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecInst Pointer to the AEC instance
+ * void* aecInst Pointer to the AEC instance
*
* Outputs Description
* -------------------------------------------------------------------
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
index a19030ae350..a13d47622c9 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/aec/system_delay_unittest.cc
@@ -9,12 +9,12 @@
*/
#include "testing/gtest/include/gtest/gtest.h"
-
extern "C" {
#include "webrtc/modules/audio_processing/aec/aec_core.h"
}
#include "webrtc/modules/audio_processing/aec/echo_cancellation_internal.h"
#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
#include "webrtc/typedefs.h"
namespace {
@@ -46,16 +46,18 @@ class SystemDelayTest : public ::testing::Test {
aecpc_t* self_;
int samples_per_frame_;
// Dummy input/output speech data.
- int16_t far_[160];
- int16_t near_[160];
- int16_t out_[160];
+ static const int kSamplesPerChunk = 160;
+ int16_t far_[kSamplesPerChunk];
+ float near_[kSamplesPerChunk];
+ float out_[kSamplesPerChunk];
};
SystemDelayTest::SystemDelayTest()
: handle_(NULL), self_(NULL), samples_per_frame_(0) {
// Dummy input data are set with more or less arbitrary non-zero values.
memset(far_, 1, sizeof(far_));
- memset(near_, 2, sizeof(near_));
+ for (int i = 0; i < kSamplesPerChunk; i++)
+ near_[i] = 514.0;
memset(out_, 0, sizeof(out_));
}
@@ -251,6 +253,9 @@ TEST_F(SystemDelayTest, CorrectDelayAfterStableBufferBuildUp) {
// conditions, but with an empty internal far-end buffer. Once that is done we
// verify that the system delay is increased correctly until we have reach an
// internal buffer size of 75% of what's been reported.
+
+ // This test assumes the reported delays are used.
+ WebRtcAec_enable_reported_delay(WebRtcAec_aec_core(handle_), 1);
for (size_t i = 0; i < kNumSampleRates; i++) {
Init(kSampleRateHz[i]);
@@ -332,6 +337,9 @@ TEST_F(SystemDelayTest, CorrectDelayDuringDrift) {
// device buffer. The drift is simulated by decreasing the reported device
// buffer size by 1 ms every 100 ms. If the device buffer size goes below 30
// ms we jump (add) 10 ms to give a repeated pattern.
+
+ // This test assumes the reported delays are used.
+ WebRtcAec_enable_reported_delay(WebRtcAec_aec_core(handle_), 1);
for (size_t i = 0; i < kNumSampleRates; i++) {
Init(kSampleRateHz[i]);
RunStableStartup();
@@ -365,6 +373,9 @@ TEST_F(SystemDelayTest, ShouldRecoverAfterGlitch) {
// the device.
// The system is said to be in a non-causal state if the difference between
// the device buffer and system delay is less than a block (64 samples).
+
+ // This test assumes the reported delays are used.
+ WebRtcAec_enable_reported_delay(WebRtcAec_aec_core(handle_), 1);
for (size_t i = 0; i < kNumSampleRates; i++) {
Init(kSampleRateHz[i]);
RunStableStartup();
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core.c b/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core.c
index fc94f1b889e..0f34874612d 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core.c
@@ -266,6 +266,9 @@ int WebRtcAecm_CreateCore(AecmCore_t **aecmInst)
aecm = NULL;
return -1;
}
+ // TODO(bjornv): Explicitly disable robust delay validation until no
+ // performance regression has been established. Then remove the line.
+ WebRtc_enable_robust_validation(aecm->delay_estimator, 0);
aecm->real_fft = WebRtcSpl_CreateRealFFT(PART_LEN_SHIFT);
if (aecm->real_fft == NULL) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core_c.c b/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core_c.c
index 63d4ac90280..f8491e97378 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core_c.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aecm/aecm_core_c.c
@@ -260,7 +260,7 @@ static int TimeToFrequencyDomain(AecmCore_t* aecm,
__asm __volatile(
"smulbb %[tmp32no1], %[real], %[real]\n\t"
"smlabb %[tmp32no2], %[imag], %[imag], %[tmp32no1]\n\t"
- :[tmp32no1]"+r"(tmp32no1),
+ :[tmp32no1]"+&r"(tmp32no1),
[tmp32no2]"=r"(tmp32no2)
:[real]"r"(freq_signal[i].real),
[imag]"r"(freq_signal[i].imag)
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c b/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
index b896de0a214..088bbf03f74 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/aecm/echo_control_mobile.c
@@ -443,27 +443,14 @@ int32_t WebRtcAecm_Process(void *aecmInst, const int16_t *nearendNoisy,
// Call the AECM
/*WebRtcAecm_ProcessFrame(aecm->aecmCore, farend, &nearend[FRAME_LEN * i],
&out[FRAME_LEN * i], aecm->knownDelay);*/
- if (nearendClean == NULL)
- {
- if (WebRtcAecm_ProcessFrame(aecm->aecmCore,
- farend_ptr,
- &nearendNoisy[FRAME_LEN * i],
- NULL,
- &out[FRAME_LEN * i]) == -1)
- {
- return -1;
- }
- } else
- {
- if (WebRtcAecm_ProcessFrame(aecm->aecmCore,
- farend_ptr,
- &nearendNoisy[FRAME_LEN * i],
- &nearendClean[FRAME_LEN * i],
- &out[FRAME_LEN * i]) == -1)
- {
- return -1;
- }
- }
+ if (WebRtcAecm_ProcessFrame(aecm->aecmCore,
+ farend_ptr,
+ &nearendNoisy[FRAME_LEN * i],
+ (nearendClean
+ ? &nearendClean[FRAME_LEN * i]
+ : NULL),
+ &out[FRAME_LEN * i]) == -1)
+ return -1;
}
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h b/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
index 8ea2e87e2ff..ac43576dd26 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h
@@ -45,7 +45,7 @@ extern "C" {
*
* Inputs Description
* -------------------------------------------------------------------
- * void **aecmInst Pointer to the AECM instance to be
+ * void** aecmInst Pointer to the AECM instance to be
* created and initialized
*
* Outputs Description
@@ -60,11 +60,11 @@ int32_t WebRtcAecm_Create(void **aecmInst);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
+ * void* aecmInst Pointer to the AECM instance
*
* Outputs Description
* -------------------------------------------------------------------
- * int32_t return 0: OK
+ * int32_t return 0: OK
* -1: error
*/
int32_t WebRtcAecm_Free(void *aecmInst);
@@ -74,7 +74,7 @@ int32_t WebRtcAecm_Free(void *aecmInst);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
+ * void* aecmInst Pointer to the AECM instance
* int32_t sampFreq Sampling frequency of data
*
* Outputs Description
@@ -89,8 +89,8 @@ int32_t WebRtcAecm_Init(void* aecmInst, int32_t sampFreq);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
- * int16_t *farend In buffer containing one frame of
+ * void* aecmInst Pointer to the AECM instance
+ * int16_t* farend In buffer containing one frame of
* farend signal
* int16_t nrOfSamples Number of samples in farend buffer
*
@@ -106,14 +106,14 @@ int32_t WebRtcAecm_BufferFarend(void* aecmInst,
/*
* Runs the AECM on an 80 or 160 sample blocks of data.
*
- * Inputs Description
+ * Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
- * int16_t *nearendNoisy In buffer containing one frame of
+ * void* aecmInst Pointer to the AECM instance
+ * int16_t* nearendNoisy In buffer containing one frame of
* reference nearend+echo signal. If
* noise reduction is active, provide
* the noisy signal here.
- * int16_t *nearendClean In buffer containing one frame of
+ * int16_t* nearendClean In buffer containing one frame of
* nearend+echo signal. If noise
* reduction is active, provide the
* clean signal here. Otherwise pass a
@@ -122,11 +122,11 @@ int32_t WebRtcAecm_BufferFarend(void* aecmInst,
* int16_t msInSndCardBuf Delay estimate for sound card and
* system buffers
*
- * Outputs Description
+ * Outputs Description
* -------------------------------------------------------------------
- * int16_t *out Out buffer, one frame of processed nearend
- * int32_t return 0: OK
- * -1: error
+ * int16_t* out Out buffer, one frame of processed nearend
+ * int32_t return 0: OK
+ * -1: error
*/
int32_t WebRtcAecm_Process(void* aecmInst,
const int16_t* nearendNoisy,
@@ -140,8 +140,8 @@ int32_t WebRtcAecm_Process(void* aecmInst,
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
- * AecmConfig config Config instance that contains all
+ * void* aecmInst Pointer to the AECM instance
+ * AecmConfig config Config instance that contains all
* properties to be set
*
* Outputs Description
@@ -156,11 +156,11 @@ int32_t WebRtcAecm_set_config(void* aecmInst, AecmConfig config);
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
+ * void* aecmInst Pointer to the AECM instance
*
* Outputs Description
* -------------------------------------------------------------------
- * AecmConfig *config Pointer to the config instance that
+ * AecmConfig* config Pointer to the config instance that
* all properties will be written to
* int32_t return 0: OK
* -1: error
@@ -178,7 +178,7 @@ int32_t WebRtcAecm_get_config(void *aecmInst, AecmConfig *config);
*
* Outputs Description
* -------------------------------------------------------------------
- * int32_t return 0: OK
+ * int32_t return 0: OK
* -1: error
*/
int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
@@ -197,7 +197,7 @@ int32_t WebRtcAecm_InitEchoPath(void* aecmInst,
*
* Outputs Description
* -------------------------------------------------------------------
- * int32_t return 0: OK
+ * int32_t return 0: OK
* -1: error
*/
int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
@@ -209,7 +209,7 @@ int32_t WebRtcAecm_GetEchoPath(void* aecmInst,
*
* Outputs Description
* -------------------------------------------------------------------
- * size_t return : size in bytes
+ * size_t return Size in bytes
*/
size_t WebRtcAecm_echo_path_size_bytes();
@@ -218,7 +218,7 @@ size_t WebRtcAecm_echo_path_size_bytes();
*
* Inputs Description
* -------------------------------------------------------------------
- * void *aecmInst Pointer to the AECM instance
+ * void* aecmInst Pointer to the AECM instance
*
* Outputs Description
* -------------------------------------------------------------------
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.c b/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.c
index 1e8e3d86b2d..4f110cc2092 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.c
@@ -822,10 +822,16 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
if (inMicLevelTmp != stt->micVol)
{
- // Incoming level mismatch; update our level.
- // This could be the case if the volume is changed manually, or if the
- // sound device has a low volume resolution.
- stt->micVol = inMicLevelTmp;
+ if (inMicLevel == stt->lastInMicLevel) {
+ // We requested a volume adjustment, but it didn't occur. This is
+ // probably due to a coarse quantization of the volume slider.
+ // Restore the requested value to prevent getting stuck.
+ inMicLevelTmp = stt->micVol;
+ }
+ else {
+ // As long as the value changed, update to match.
+ stt->micVol = inMicLevelTmp;
+ }
}
if (inMicLevelTmp > stt->maxLevel)
@@ -835,6 +841,7 @@ int32_t WebRtcAgc_ProcessAnalog(void *state, int32_t inMicLevel,
}
// Store last value here, after we've taken care of manual updates etc.
+ stt->lastInMicLevel = inMicLevel;
lastMicVol = stt->micVol;
/* Checks if the signal is saturated. Also a check if individual samples
@@ -1597,6 +1604,7 @@ int WebRtcAgc_Init(void *agcInst, int32_t minLevel, int32_t maxLevel,
stt->maxInit = stt->maxLevel;
stt->zeroCtrlMax = stt->maxAnalog;
+ stt->lastInMicLevel = 0;
/* Initialize micVol parameter */
stt->micVol = stt->maxAnalog;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.h b/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.h
index 16ea29c4961..b036f449c70 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/analog_agc.h
@@ -111,6 +111,7 @@ typedef struct
int32_t minLevel; // Minimum possible volume level
int32_t minOutput; // Minimum output volume level
int32_t zeroCtrlMax; // Remember max gain => don't amp low input
+ int32_t lastInMicLevel;
int16_t scale; // Scale factor for internal volume levels
#ifdef MIC_LEVEL_FEEDBACK
diff --git a/chromium/third_party/webrtc/modules/audio_processing/agc/digital_agc.c b/chromium/third_party/webrtc/modules/audio_processing/agc/digital_agc.c
index 00565dd7230..4b169c180eb 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/agc/digital_agc.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/agc/digital_agc.c
@@ -118,7 +118,7 @@ int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
limiterLvlX = analogTarget - limiterOffset;
limiterIdx = 2
+ WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_LSHIFT_W32((int32_t)limiterLvlX, 13),
- WEBRTC_SPL_RSHIFT_U16(kLog10_2, 1));
+ (kLog10_2 / 2));
tmp16no1 = WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio);
limiterLvl = targetLevelDbfs + tmp16no1;
@@ -288,12 +288,7 @@ int32_t WebRtcAgc_InitDigital(DigitalAgc_t *stt, int16_t agcMode)
int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const int16_t *in_far,
int16_t nrSamples)
{
- // Check for valid pointer
- if (&stt->vadFarend == NULL)
- {
- return -1;
- }
-
+ assert(stt != NULL);
// VAD for far end
WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
@@ -778,7 +773,7 @@ int16_t WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state
tmp16 = WEBRTC_SPL_LSHIFT_W16(3, 12);
tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, (dB - state->meanLongTerm));
tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm);
- tmpU16 = WEBRTC_SPL_LSHIFT_U16((uint16_t)13, 12);
+ tmpU16 = (13 << 12);
tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16);
tmp32 += WEBRTC_SPL_RSHIFT_W32(tmp32b, 10);
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc
index 048d048723e..b0f1eb6c1db 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.cc
@@ -10,6 +10,8 @@
#include "webrtc/modules/audio_processing/audio_buffer.h"
+#include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/common_audio/resampler/push_sinc_resampler.h"
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
namespace webrtc {
@@ -21,145 +23,343 @@ enum {
kSamplesPer32kHzChannel = 320
};
-void StereoToMono(const int16_t* left, const int16_t* right,
- int16_t* out, int samples_per_channel) {
- assert(left != NULL && right != NULL && out != NULL);
- for (int i = 0; i < samples_per_channel; i++) {
- int32_t data32 = (static_cast<int32_t>(left[i]) +
- static_cast<int32_t>(right[i])) >> 1;
+bool HasKeyboardChannel(AudioProcessing::ChannelLayout layout) {
+ switch (layout) {
+ case AudioProcessing::kMono:
+ case AudioProcessing::kStereo:
+ return false;
+ case AudioProcessing::kMonoAndKeyboard:
+ case AudioProcessing::kStereoAndKeyboard:
+ return true;
+ }
+ assert(false);
+ return false;
+}
+
+int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) {
+ switch (layout) {
+ case AudioProcessing::kMono:
+ case AudioProcessing::kStereo:
+ assert(false);
+ return -1;
+ case AudioProcessing::kMonoAndKeyboard:
+ return 1;
+ case AudioProcessing::kStereoAndKeyboard:
+ return 2;
+ }
+ assert(false);
+ return -1;
+}
+
- out[i] = WebRtcSpl_SatW32ToW16(data32);
+void StereoToMono(const float* left, const float* right, float* out,
+ int samples_per_channel) {
+ for (int i = 0; i < samples_per_channel; ++i) {
+ out[i] = (left[i] + right[i]) / 2;
}
}
+
+void StereoToMono(const int16_t* left, const int16_t* right, int16_t* out,
+ int samples_per_channel) {
+ for (int i = 0; i < samples_per_channel; ++i) {
+ out[i] = (left[i] + right[i]) >> 1;
+ }
+}
+
} // namespace
-struct AudioChannel {
- AudioChannel() {
- memset(data, 0, sizeof(data));
+// One int16_t and one float ChannelBuffer that are kept in sync. The sync is
+// broken when someone requests write access to either ChannelBuffer, and
+// reestablished when someone requests the outdated ChannelBuffer. It is
+// therefore safe to use the return value of ibuf() and fbuf() until the next
+// call to the other method.
+class IFChannelBuffer {
+ public:
+ IFChannelBuffer(int samples_per_channel, int num_channels)
+ : ivalid_(true),
+ ibuf_(samples_per_channel, num_channels),
+ fvalid_(true),
+ fbuf_(samples_per_channel, num_channels) {}
+
+ ChannelBuffer<int16_t>* ibuf() {
+ RefreshI();
+ fvalid_ = false;
+ return &ibuf_;
+ }
+
+ ChannelBuffer<float>* fbuf() {
+ RefreshF();
+ ivalid_ = false;
+ return &fbuf_;
}
- int16_t data[kSamplesPer32kHzChannel];
+ private:
+ void RefreshF() {
+ if (!fvalid_) {
+ assert(ivalid_);
+ const int16_t* const int_data = ibuf_.data();
+ float* const float_data = fbuf_.data();
+ const int length = fbuf_.length();
+ for (int i = 0; i < length; ++i)
+ float_data[i] = int_data[i];
+ fvalid_ = true;
+ }
+ }
+
+ void RefreshI() {
+ if (!ivalid_) {
+ assert(fvalid_);
+ const float* const float_data = fbuf_.data();
+ int16_t* const int_data = ibuf_.data();
+ const int length = ibuf_.length();
+ for (int i = 0; i < length; ++i)
+ int_data[i] = WEBRTC_SPL_SAT(std::numeric_limits<int16_t>::max(),
+ float_data[i],
+ std::numeric_limits<int16_t>::min());
+ ivalid_ = true;
+ }
+ }
+
+ bool ivalid_;
+ ChannelBuffer<int16_t> ibuf_;
+ bool fvalid_;
+ ChannelBuffer<float> fbuf_;
};
-struct SplitAudioChannel {
- SplitAudioChannel() {
- memset(low_pass_data, 0, sizeof(low_pass_data));
- memset(high_pass_data, 0, sizeof(high_pass_data));
- memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
- memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
- memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
- memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
+class SplitChannelBuffer {
+ public:
+ SplitChannelBuffer(int samples_per_split_channel, int num_channels)
+ : low_(samples_per_split_channel, num_channels),
+ high_(samples_per_split_channel, num_channels) {
}
+ ~SplitChannelBuffer() {}
- int16_t low_pass_data[kSamplesPer16kHzChannel];
- int16_t high_pass_data[kSamplesPer16kHzChannel];
+ int16_t* low_channel(int i) { return low_.ibuf()->channel(i); }
+ int16_t* high_channel(int i) { return high_.ibuf()->channel(i); }
+ float* low_channel_f(int i) { return low_.fbuf()->channel(i); }
+ float* high_channel_f(int i) { return high_.fbuf()->channel(i); }
- int32_t analysis_filter_state1[6];
- int32_t analysis_filter_state2[6];
- int32_t synthesis_filter_state1[6];
- int32_t synthesis_filter_state2[6];
+ private:
+ IFChannelBuffer low_;
+ IFChannelBuffer high_;
};
-// TODO(andrew): check range of input parameters?
-AudioBuffer::AudioBuffer(int max_num_channels,
- int samples_per_channel)
- : max_num_channels_(max_num_channels),
- num_channels_(0),
+AudioBuffer::AudioBuffer(int input_samples_per_channel,
+ int num_input_channels,
+ int process_samples_per_channel,
+ int num_process_channels,
+ int output_samples_per_channel)
+ : input_samples_per_channel_(input_samples_per_channel),
+ num_input_channels_(num_input_channels),
+ proc_samples_per_channel_(process_samples_per_channel),
+ num_proc_channels_(num_process_channels),
+ output_samples_per_channel_(output_samples_per_channel),
+ samples_per_split_channel_(proc_samples_per_channel_),
num_mixed_channels_(0),
num_mixed_low_pass_channels_(0),
- data_was_mixed_(false),
- samples_per_channel_(samples_per_channel),
- samples_per_split_channel_(samples_per_channel),
reference_copied_(false),
activity_(AudioFrame::kVadUnknown),
- is_muted_(false),
- data_(NULL),
- channels_(NULL),
- split_channels_(NULL),
- mixed_channels_(NULL),
- mixed_low_pass_channels_(NULL),
- low_pass_reference_channels_(NULL) {
- if (max_num_channels_ > 1) {
- channels_.reset(new AudioChannel[max_num_channels_]);
- mixed_channels_.reset(new AudioChannel[max_num_channels_]);
- mixed_low_pass_channels_.reset(new AudioChannel[max_num_channels_]);
+ keyboard_data_(NULL),
+ channels_(new IFChannelBuffer(proc_samples_per_channel_,
+ num_proc_channels_)) {
+ assert(input_samples_per_channel_ > 0);
+ assert(proc_samples_per_channel_ > 0);
+ assert(output_samples_per_channel_ > 0);
+ assert(num_input_channels_ > 0 && num_input_channels_ <= 2);
+ assert(num_proc_channels_ <= num_input_channels);
+
+ if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
+ input_buffer_.reset(new ChannelBuffer<float>(input_samples_per_channel_,
+ num_proc_channels_));
+ }
+
+ if (input_samples_per_channel_ != proc_samples_per_channel_ ||
+ output_samples_per_channel_ != proc_samples_per_channel_) {
+ // Create an intermediate buffer for resampling.
+ process_buffer_.reset(new ChannelBuffer<float>(proc_samples_per_channel_,
+ num_proc_channels_));
+ }
+
+ if (input_samples_per_channel_ != proc_samples_per_channel_) {
+ input_resamplers_.reserve(num_proc_channels_);
+ for (int i = 0; i < num_proc_channels_; ++i) {
+ input_resamplers_.push_back(
+ new PushSincResampler(input_samples_per_channel_,
+ proc_samples_per_channel_));
+ }
}
- low_pass_reference_channels_.reset(new AudioChannel[max_num_channels_]);
- if (samples_per_channel_ == kSamplesPer32kHzChannel) {
- split_channels_.reset(new SplitAudioChannel[max_num_channels_]);
+ if (output_samples_per_channel_ != proc_samples_per_channel_) {
+ output_resamplers_.reserve(num_proc_channels_);
+ for (int i = 0; i < num_proc_channels_; ++i) {
+ output_resamplers_.push_back(
+ new PushSincResampler(proc_samples_per_channel_,
+ output_samples_per_channel_));
+ }
+ }
+
+ if (proc_samples_per_channel_ == kSamplesPer32kHzChannel) {
samples_per_split_channel_ = kSamplesPer16kHzChannel;
+ split_channels_.reset(new SplitChannelBuffer(samples_per_split_channel_,
+ num_proc_channels_));
+ filter_states_.reset(new SplitFilterStates[num_proc_channels_]);
}
}
AudioBuffer::~AudioBuffer() {}
-int16_t* AudioBuffer::data(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- if (data_ != NULL) {
- return data_;
+void AudioBuffer::CopyFrom(const float* const* data,
+ int samples_per_channel,
+ AudioProcessing::ChannelLayout layout) {
+ assert(samples_per_channel == input_samples_per_channel_);
+ assert(ChannelsFromLayout(layout) == num_input_channels_);
+ InitForNewData();
+
+ if (HasKeyboardChannel(layout)) {
+ keyboard_data_ = data[KeyboardChannelIndex(layout)];
}
- return channels_[channel].data;
-}
+ // Downmix.
+ const float* const* data_ptr = data;
+ if (num_input_channels_ == 2 && num_proc_channels_ == 1) {
+ StereoToMono(data[0],
+ data[1],
+ input_buffer_->channel(0),
+ input_samples_per_channel_);
+ data_ptr = input_buffer_->channels();
+ }
-int16_t* AudioBuffer::low_pass_split_data(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- if (split_channels_.get() == NULL) {
- return data(channel);
+ // Resample.
+ if (input_samples_per_channel_ != proc_samples_per_channel_) {
+ for (int i = 0; i < num_proc_channels_; ++i) {
+ input_resamplers_[i]->Resample(data_ptr[i],
+ input_samples_per_channel_,
+ process_buffer_->channel(i),
+ proc_samples_per_channel_);
+ }
+ data_ptr = process_buffer_->channels();
}
- return split_channels_[channel].low_pass_data;
+ // Convert to int16.
+ for (int i = 0; i < num_proc_channels_; ++i) {
+ ScaleAndRoundToInt16(data_ptr[i], proc_samples_per_channel_,
+ channels_->ibuf()->channel(i));
+ }
}
-int16_t* AudioBuffer::high_pass_split_data(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- if (split_channels_.get() == NULL) {
- return NULL;
+void AudioBuffer::CopyTo(int samples_per_channel,
+ AudioProcessing::ChannelLayout layout,
+ float* const* data) {
+ assert(samples_per_channel == output_samples_per_channel_);
+ assert(ChannelsFromLayout(layout) == num_proc_channels_);
+
+ // Convert to float.
+ float* const* data_ptr = data;
+ if (output_samples_per_channel_ != proc_samples_per_channel_) {
+ // Convert to an intermediate buffer for subsequent resampling.
+ data_ptr = process_buffer_->channels();
+ }
+ for (int i = 0; i < num_proc_channels_; ++i) {
+ ScaleToFloat(channels_->ibuf()->channel(i),
+ proc_samples_per_channel_,
+ data_ptr[i]);
+ }
+
+ // Resample.
+ if (output_samples_per_channel_ != proc_samples_per_channel_) {
+ for (int i = 0; i < num_proc_channels_; ++i) {
+ output_resamplers_[i]->Resample(data_ptr[i],
+ proc_samples_per_channel_,
+ data[i],
+ output_samples_per_channel_);
+ }
}
+}
- return split_channels_[channel].high_pass_data;
+void AudioBuffer::InitForNewData() {
+ keyboard_data_ = NULL;
+ num_mixed_channels_ = 0;
+ num_mixed_low_pass_channels_ = 0;
+ reference_copied_ = false;
+ activity_ = AudioFrame::kVadUnknown;
}
-int16_t* AudioBuffer::mixed_data(int channel) const {
+const int16_t* AudioBuffer::data(int channel) const {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return channels_->ibuf()->channel(channel);
+}
+
+int16_t* AudioBuffer::data(int channel) {
+ const AudioBuffer* t = this;
+ return const_cast<int16_t*>(t->data(channel));
+}
+
+float* AudioBuffer::data_f(int channel) {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return channels_->fbuf()->channel(channel);
+}
+
+const int16_t* AudioBuffer::low_pass_split_data(int channel) const {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return split_channels_.get() ? split_channels_->low_channel(channel)
+ : data(channel);
+}
+
+int16_t* AudioBuffer::low_pass_split_data(int channel) {
+ const AudioBuffer* t = this;
+ return const_cast<int16_t*>(t->low_pass_split_data(channel));
+}
+
+float* AudioBuffer::low_pass_split_data_f(int channel) {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return split_channels_.get() ? split_channels_->low_channel_f(channel)
+ : data_f(channel);
+}
+
+const int16_t* AudioBuffer::high_pass_split_data(int channel) const {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return split_channels_.get() ? split_channels_->high_channel(channel) : NULL;
+}
+
+int16_t* AudioBuffer::high_pass_split_data(int channel) {
+ const AudioBuffer* t = this;
+ return const_cast<int16_t*>(t->high_pass_split_data(channel));
+}
+
+float* AudioBuffer::high_pass_split_data_f(int channel) {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return split_channels_.get() ? split_channels_->high_channel_f(channel)
+ : NULL;
+}
+
+const int16_t* AudioBuffer::mixed_data(int channel) const {
assert(channel >= 0 && channel < num_mixed_channels_);
- return mixed_channels_[channel].data;
+ return mixed_channels_->channel(channel);
}
-int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
+const int16_t* AudioBuffer::mixed_low_pass_data(int channel) const {
assert(channel >= 0 && channel < num_mixed_low_pass_channels_);
- return mixed_low_pass_channels_[channel].data;
+ return mixed_low_pass_channels_->channel(channel);
}
-int16_t* AudioBuffer::low_pass_reference(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
+const int16_t* AudioBuffer::low_pass_reference(int channel) const {
+ assert(channel >= 0 && channel < num_proc_channels_);
if (!reference_copied_) {
return NULL;
}
- return low_pass_reference_channels_[channel].data;
-}
-
-int32_t* AudioBuffer::analysis_filter_state1(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- return split_channels_[channel].analysis_filter_state1;
-}
-
-int32_t* AudioBuffer::analysis_filter_state2(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- return split_channels_[channel].analysis_filter_state2;
+ return low_pass_reference_channels_->channel(channel);
}
-int32_t* AudioBuffer::synthesis_filter_state1(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- return split_channels_[channel].synthesis_filter_state1;
+const float* AudioBuffer::keyboard_data() const {
+ return keyboard_data_;
}
-int32_t* AudioBuffer::synthesis_filter_state2(int channel) const {
- assert(channel >= 0 && channel < num_channels_);
- return split_channels_[channel].synthesis_filter_state2;
+SplitFilterStates* AudioBuffer::filter_states(int channel) {
+ assert(channel >= 0 && channel < num_proc_channels_);
+ return &filter_states_[channel];
}
void AudioBuffer::set_activity(AudioFrame::VADActivity activity) {
@@ -170,126 +370,96 @@ AudioFrame::VADActivity AudioBuffer::activity() const {
return activity_;
}
-bool AudioBuffer::is_muted() const {
- return is_muted_;
-}
-
int AudioBuffer::num_channels() const {
- return num_channels_;
+ return num_proc_channels_;
}
int AudioBuffer::samples_per_channel() const {
- return samples_per_channel_;
+ return proc_samples_per_channel_;
}
int AudioBuffer::samples_per_split_channel() const {
return samples_per_split_channel_;
}
+int AudioBuffer::samples_per_keyboard_channel() const {
+ // We don't resample the keyboard channel.
+ return input_samples_per_channel_;
+}
+
// TODO(andrew): Do deinterleaving and mixing in one step?
void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) {
- assert(frame->num_channels_ <= max_num_channels_);
- assert(frame->samples_per_channel_ == samples_per_channel_);
-
- num_channels_ = frame->num_channels_;
- data_was_mixed_ = false;
- num_mixed_channels_ = 0;
- num_mixed_low_pass_channels_ = 0;
- reference_copied_ = false;
+ assert(proc_samples_per_channel_ == input_samples_per_channel_);
+ assert(num_proc_channels_ == num_input_channels_);
+ assert(frame->num_channels_ == num_proc_channels_);
+ assert(frame->samples_per_channel_ == proc_samples_per_channel_);
+ InitForNewData();
activity_ = frame->vad_activity_;
- is_muted_ = false;
- if (frame->energy_ == 0) {
- is_muted_ = true;
- }
-
- if (num_channels_ == 1) {
- // We can get away with a pointer assignment in this case.
- data_ = frame->data_;
- return;
- }
int16_t* interleaved = frame->data_;
- for (int i = 0; i < num_channels_; i++) {
- int16_t* deinterleaved = channels_[i].data;
+ for (int i = 0; i < num_proc_channels_; i++) {
+ int16_t* deinterleaved = channels_->ibuf()->channel(i);
int interleaved_idx = i;
- for (int j = 0; j < samples_per_channel_; j++) {
+ for (int j = 0; j < proc_samples_per_channel_; j++) {
deinterleaved[j] = interleaved[interleaved_idx];
- interleaved_idx += num_channels_;
+ interleaved_idx += num_proc_channels_;
}
}
}
void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const {
- assert(frame->num_channels_ == num_channels_);
- assert(frame->samples_per_channel_ == samples_per_channel_);
+ assert(proc_samples_per_channel_ == output_samples_per_channel_);
+ assert(num_proc_channels_ == num_input_channels_);
+ assert(frame->num_channels_ == num_proc_channels_);
+ assert(frame->samples_per_channel_ == proc_samples_per_channel_);
frame->vad_activity_ = activity_;
if (!data_changed) {
return;
}
- if (num_channels_ == 1) {
- if (data_was_mixed_) {
- memcpy(frame->data_,
- channels_[0].data,
- sizeof(int16_t) * samples_per_channel_);
- } else {
- // These should point to the same buffer in this case.
- assert(data_ == frame->data_);
- }
-
- return;
- }
-
int16_t* interleaved = frame->data_;
- for (int i = 0; i < num_channels_; i++) {
- int16_t* deinterleaved = channels_[i].data;
+ for (int i = 0; i < num_proc_channels_; i++) {
+ int16_t* deinterleaved = channels_->ibuf()->channel(i);
int interleaved_idx = i;
- for (int j = 0; j < samples_per_channel_; j++) {
+ for (int j = 0; j < proc_samples_per_channel_; j++) {
interleaved[interleaved_idx] = deinterleaved[j];
- interleaved_idx += num_channels_;
+ interleaved_idx += num_proc_channels_;
}
}
}
-// TODO(andrew): would be good to support the no-mix case with pointer
-// assignment.
-// TODO(andrew): handle mixing to multiple channels?
-void AudioBuffer::Mix(int num_mixed_channels) {
- // We currently only support the stereo to mono case.
- assert(num_channels_ == 2);
- assert(num_mixed_channels == 1);
-
- StereoToMono(channels_[0].data,
- channels_[1].data,
- channels_[0].data,
- samples_per_channel_);
-
- num_channels_ = num_mixed_channels;
- data_was_mixed_ = true;
-}
-
void AudioBuffer::CopyAndMix(int num_mixed_channels) {
// We currently only support the stereo to mono case.
- assert(num_channels_ == 2);
+ assert(num_proc_channels_ == 2);
assert(num_mixed_channels == 1);
+ if (!mixed_channels_.get()) {
+ mixed_channels_.reset(
+ new ChannelBuffer<int16_t>(proc_samples_per_channel_,
+ num_mixed_channels));
+ }
- StereoToMono(channels_[0].data,
- channels_[1].data,
- mixed_channels_[0].data,
- samples_per_channel_);
+ StereoToMono(channels_->ibuf()->channel(0),
+ channels_->ibuf()->channel(1),
+ mixed_channels_->channel(0),
+ proc_samples_per_channel_);
num_mixed_channels_ = num_mixed_channels;
}
void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
// We currently only support the stereo to mono case.
- assert(num_channels_ == 2);
+ assert(num_proc_channels_ == 2);
assert(num_mixed_channels == 1);
+ if (!mixed_low_pass_channels_.get()) {
+ mixed_low_pass_channels_.reset(
+ new ChannelBuffer<int16_t>(samples_per_split_channel_,
+ num_mixed_channels));
+ }
StereoToMono(low_pass_split_data(0),
low_pass_split_data(1),
- mixed_low_pass_channels_[0].data,
+ mixed_low_pass_channels_->channel(0),
samples_per_split_channel_);
num_mixed_low_pass_channels_ = num_mixed_channels;
@@ -297,10 +467,14 @@ void AudioBuffer::CopyAndMixLowPass(int num_mixed_channels) {
void AudioBuffer::CopyLowPassToReference() {
reference_copied_ = true;
- for (int i = 0; i < num_channels_; i++) {
- memcpy(low_pass_reference_channels_[i].data,
- low_pass_split_data(i),
- sizeof(int16_t) * samples_per_split_channel_);
+ if (!low_pass_reference_channels_.get()) {
+ low_pass_reference_channels_.reset(
+ new ChannelBuffer<int16_t>(samples_per_split_channel_,
+ num_proc_channels_));
+ }
+ for (int i = 0; i < num_proc_channels_; i++) {
+ low_pass_reference_channels_->CopyFrom(low_pass_split_data(i), i);
}
}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
index 2638bef6058..67e4f485043 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
@@ -8,75 +8,124 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+#include <vector>
+
+#include "webrtc/modules/audio_processing/common.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/scoped_vector.h"
#include "webrtc/typedefs.h"
namespace webrtc {
-struct AudioChannel;
-struct SplitAudioChannel;
+class PushSincResampler;
+class SplitChannelBuffer;
+class IFChannelBuffer;
+
+struct SplitFilterStates {
+ SplitFilterStates() {
+ memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
+ memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
+ memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
+ memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
+ }
+
+ static const int kStateSize = 6;
+ int analysis_filter_state1[kStateSize];
+ int analysis_filter_state2[kStateSize];
+ int synthesis_filter_state1[kStateSize];
+ int synthesis_filter_state2[kStateSize];
+};
class AudioBuffer {
public:
- AudioBuffer(int max_num_channels, int samples_per_channel);
+ // TODO(ajm): Switch to take ChannelLayouts.
+ AudioBuffer(int input_samples_per_channel,
+ int num_input_channels,
+ int process_samples_per_channel,
+ int num_process_channels,
+ int output_samples_per_channel);
virtual ~AudioBuffer();
int num_channels() const;
int samples_per_channel() const;
int samples_per_split_channel() const;
+ int samples_per_keyboard_channel() const;
- int16_t* data(int channel) const;
- int16_t* low_pass_split_data(int channel) const;
- int16_t* high_pass_split_data(int channel) const;
- int16_t* mixed_data(int channel) const;
- int16_t* mixed_low_pass_data(int channel) const;
- int16_t* low_pass_reference(int channel) const;
+ int16_t* data(int channel);
+ const int16_t* data(int channel) const;
+ int16_t* low_pass_split_data(int channel);
+ const int16_t* low_pass_split_data(int channel) const;
+ int16_t* high_pass_split_data(int channel);
+ const int16_t* high_pass_split_data(int channel) const;
+ const int16_t* mixed_data(int channel) const;
+ const int16_t* mixed_low_pass_data(int channel) const;
+ const int16_t* low_pass_reference(int channel) const;
- int32_t* analysis_filter_state1(int channel) const;
- int32_t* analysis_filter_state2(int channel) const;
- int32_t* synthesis_filter_state1(int channel) const;
- int32_t* synthesis_filter_state2(int channel) const;
+ // Float versions of the accessors, with automatic conversion back and forth
+ // as necessary. The range of the numbers are the same as for int16_t.
+ float* data_f(int channel);
+ float* low_pass_split_data_f(int channel);
+ float* high_pass_split_data_f(int channel);
+
+ const float* keyboard_data() const;
+
+ SplitFilterStates* filter_states(int channel);
void set_activity(AudioFrame::VADActivity activity);
AudioFrame::VADActivity activity() const;
- bool is_muted() const;
-
+ // Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
void InterleaveTo(AudioFrame* audioFrame) const;
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
- void Mix(int num_mixed_channels);
+
+ // Use for float deinterleaved data.
+ void CopyFrom(const float* const* data,
+ int samples_per_channel,
+ AudioProcessing::ChannelLayout layout);
+ void CopyTo(int samples_per_channel,
+ AudioProcessing::ChannelLayout layout,
+ float* const* data);
+
void CopyAndMix(int num_mixed_channels);
void CopyAndMixLowPass(int num_mixed_channels);
void CopyLowPassToReference();
private:
- const int max_num_channels_;
- int num_channels_;
+ // Called from DeinterleaveFrom() and CopyFrom().
+ void InitForNewData();
+
+ const int input_samples_per_channel_;
+ const int num_input_channels_;
+ const int proc_samples_per_channel_;
+ const int num_proc_channels_;
+ const int output_samples_per_channel_;
+ int samples_per_split_channel_;
int num_mixed_channels_;
int num_mixed_low_pass_channels_;
- // Whether the original data was replaced with mixed data.
- bool data_was_mixed_;
- const int samples_per_channel_;
- int samples_per_split_channel_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
- bool is_muted_;
-
- int16_t* data_;
- scoped_array<AudioChannel> channels_;
- scoped_array<SplitAudioChannel> split_channels_;
- scoped_array<AudioChannel> mixed_channels_;
- // TODO(andrew): improve this, we don't need the full 32 kHz space here.
- scoped_array<AudioChannel> mixed_low_pass_channels_;
- scoped_array<AudioChannel> low_pass_reference_channels_;
+
+ const float* keyboard_data_;
+ scoped_ptr<IFChannelBuffer> channels_;
+ scoped_ptr<SplitChannelBuffer> split_channels_;
+ scoped_ptr<SplitFilterStates[]> filter_states_;
+ scoped_ptr<ChannelBuffer<int16_t> > mixed_channels_;
+ scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
+ scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
+ scoped_ptr<ChannelBuffer<float> > input_buffer_;
+ scoped_ptr<ChannelBuffer<float> > process_buffer_;
+ ScopedVector<PushSincResampler> input_resamplers_;
+ ScopedVector<PushSincResampler> output_resamplers_;
};
+
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi b/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi
index 336b4eee75f..b1d18c5b06a 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing.gypi
@@ -12,6 +12,7 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
],
+ 'shared_generated_dir': '<(SHARED_INTERMEDIATE_DIR)/audio_processing/asm_offsets',
},
'targets': [
{
@@ -53,9 +54,9 @@
'audio_buffer.h',
'audio_processing_impl.cc',
'audio_processing_impl.h',
+ 'common.h',
'echo_cancellation_impl.cc',
'echo_cancellation_impl.h',
- 'echo_cancellation_impl_wrapper.h',
'echo_control_mobile_impl.cc',
'echo_control_mobile_impl.h',
'gain_control_impl.cc',
@@ -67,10 +68,12 @@
'level_estimator_impl.h',
'noise_suppression_impl.cc',
'noise_suppression_impl.h',
- 'splitting_filter.cc',
- 'splitting_filter.h',
'processing_component.cc',
'processing_component.h',
+ 'rms_level.cc',
+ 'rms_level.h',
+ 'typing_detection.cc',
+ 'typing_detection.h',
'utility/delay_estimator.c',
'utility/delay_estimator.h',
'utility/delay_estimator_internal.h',
@@ -103,6 +106,17 @@
'ns/nsx_core.h',
'ns/nsx_defines.h',
],
+ 'conditions': [
+ ['target_arch=="mipsel"', {
+ 'sources': [
+ 'ns/nsx_core_mips.c',
+ ],
+ }, {
+ 'sources': [
+ 'ns/nsx_core_c.c',
+ ],
+ }],
+ ],
}, {
'defines': ['WEBRTC_NS_FLOAT'],
'sources': [
@@ -124,6 +138,14 @@
'sources': [
'aecm/aecm_core_mips.c',
],
+ 'conditions': [
+ ['mips_fpu==1', {
+ 'sources': [
+ 'aec/aec_core_mips.c',
+ 'aec/aec_rdft_mips.c',
+ ],
+ }],
+ ],
}, {
'sources': [
'aecm/aecm_core_c.c',
@@ -177,18 +199,22 @@
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
],
'sources': [
+ 'aec/aec_core_neon.c',
'aecm/aecm_core_neon.c',
'ns/nsx_core_neon.c',
],
'conditions': [
['OS=="android" or OS=="ios"', {
'dependencies': [
- 'audio_processing_offsets',
+ '<(gen_core_neon_offsets_gyp):*',
],
'sources': [
'aecm/aecm_core_neon.S',
'ns/nsx_core_neon.S',
],
+ 'include_dirs': [
+ '<(shared_generated_dir)',
+ ],
'sources!': [
'aecm/aecm_core_neon.c',
'ns/nsx_core_neon.c',
@@ -197,22 +223,6 @@
}],
],
}],
- 'conditions': [
- ['OS=="android" or OS=="ios"', {
- 'targets': [{
- 'target_name': 'audio_processing_offsets',
- 'type': 'none',
- 'sources': [
- 'aecm/aecm_core_neon_offsets.c',
- 'ns/nsx_core_neon_offsets.c',
- ],
- 'variables': {
- 'asm_header_dir': 'asm_offsets',
- },
- 'includes': ['../../build/generate_asm_header.gypi',],
- }],
- }],
- ],
}],
],
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc
index 4d36ff7e7b4..de387edb2f5 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.cc
@@ -12,17 +12,20 @@
#include <assert.h>
+#include "webrtc/common_audio/include/audio_util.h"
+#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
+#include "webrtc/modules/audio_processing/common.h"
+#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
#include "webrtc/modules/audio_processing/gain_control_impl.h"
#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
#include "webrtc/modules/audio_processing/level_estimator_impl.h"
#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
#include "webrtc/modules/audio_processing/processing_component.h"
-#include "webrtc/modules/audio_processing/splitting_filter.h"
#include "webrtc/modules/audio_processing/voice_detection_impl.h"
#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/system_wrappers/interface/compile_assert.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/file_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -36,9 +39,30 @@
#endif
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
+#define RETURN_ON_ERR(expr) \
+ do { \
+ int err = expr; \
+ if (err != kNoError) { \
+ return err; \
+ } \
+ } while (0)
+
namespace webrtc {
+
+// Throughout webrtc, it's assumed that success is represented by zero.
+COMPILE_ASSERT(AudioProcessing::kNoError == 0, no_error_must_be_zero);
+
AudioProcessing* AudioProcessing::Create(int id) {
- AudioProcessingImpl* apm = new AudioProcessingImpl(id);
+ return Create();
+}
+
+AudioProcessing* AudioProcessing::Create() {
+ Config config;
+ return Create(config);
+}
+
+AudioProcessing* AudioProcessing::Create(const Config& config) {
+ AudioProcessingImpl* apm = new AudioProcessingImpl(config);
if (apm->Initialize() != kNoError) {
delete apm;
apm = NULL;
@@ -47,12 +71,8 @@ AudioProcessing* AudioProcessing::Create(int id) {
return apm;
}
-int32_t AudioProcessing::TimeUntilNextProcess() { return -1; }
-int32_t AudioProcessing::Process() { return -1; }
-
-AudioProcessingImpl::AudioProcessingImpl(int id)
- : id_(id),
- echo_cancellation_(NULL),
+AudioProcessingImpl::AudioProcessingImpl(const Config& config)
+ : echo_cancellation_(NULL),
echo_control_mobile_(NULL),
gain_control_(NULL),
high_pass_filter_(NULL),
@@ -60,41 +80,43 @@ AudioProcessingImpl::AudioProcessingImpl(int id)
noise_suppression_(NULL),
voice_detection_(NULL),
crit_(CriticalSectionWrapper::CreateCriticalSection()),
- render_audio_(NULL),
- capture_audio_(NULL),
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
debug_file_(FileWrapper::Create()),
event_msg_(new audioproc::Event()),
#endif
- sample_rate_hz_(kSampleRate16kHz),
- split_sample_rate_hz_(kSampleRate16kHz),
- samples_per_channel_(sample_rate_hz_ / 100),
+ fwd_in_format_(kSampleRate16kHz, 1),
+ fwd_proc_format_(kSampleRate16kHz, 1),
+ fwd_out_format_(kSampleRate16kHz),
+ rev_in_format_(kSampleRate16kHz, 1),
+ rev_proc_format_(kSampleRate16kHz, 1),
+ split_rate_(kSampleRate16kHz),
stream_delay_ms_(0),
delay_offset_ms_(0),
was_stream_delay_set_(false),
- num_reverse_channels_(1),
- num_input_channels_(1),
- num_output_channels_(1) {
- echo_cancellation_ = EchoCancellationImplWrapper::Create(this);
+ output_will_be_muted_(false),
+ key_pressed_(false) {
+ echo_cancellation_ = new EchoCancellationImpl(this, crit_);
component_list_.push_back(echo_cancellation_);
- echo_control_mobile_ = new EchoControlMobileImpl(this);
+ echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
component_list_.push_back(echo_control_mobile_);
- gain_control_ = new GainControlImpl(this);
+ gain_control_ = new GainControlImpl(this, crit_);
component_list_.push_back(gain_control_);
- high_pass_filter_ = new HighPassFilterImpl(this);
+ high_pass_filter_ = new HighPassFilterImpl(this, crit_);
component_list_.push_back(high_pass_filter_);
- level_estimator_ = new LevelEstimatorImpl(this);
+ level_estimator_ = new LevelEstimatorImpl(this, crit_);
component_list_.push_back(level_estimator_);
- noise_suppression_ = new NoiseSuppressionImpl(this);
+ noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
component_list_.push_back(noise_suppression_);
- voice_detection_ = new VoiceDetectionImpl(this);
+ voice_detection_ = new VoiceDetectionImpl(this, crit_);
component_list_.push_back(voice_detection_);
+
+ SetExtraOptions(config);
}
AudioProcessingImpl::~AudioProcessingImpl() {
@@ -112,52 +134,52 @@ AudioProcessingImpl::~AudioProcessingImpl() {
debug_file_->CloseFile();
}
#endif
-
- if (render_audio_) {
- delete render_audio_;
- render_audio_ = NULL;
- }
-
- if (capture_audio_) {
- delete capture_audio_;
- capture_audio_ = NULL;
- }
}
-
delete crit_;
crit_ = NULL;
}
-CriticalSectionWrapper* AudioProcessingImpl::crit() const {
- return crit_;
-}
-
-int AudioProcessingImpl::split_sample_rate_hz() const {
- return split_sample_rate_hz_;
-}
-
int AudioProcessingImpl::Initialize() {
CriticalSectionScoped crit_scoped(crit_);
return InitializeLocked();
}
-int AudioProcessingImpl::InitializeLocked() {
- if (render_audio_ != NULL) {
- delete render_audio_;
- render_audio_ = NULL;
- }
-
- if (capture_audio_ != NULL) {
- delete capture_audio_;
- capture_audio_ = NULL;
- }
-
- render_audio_ = new AudioBuffer(num_reverse_channels_,
- samples_per_channel_);
- capture_audio_ = new AudioBuffer(num_input_channels_,
- samples_per_channel_);
+int AudioProcessingImpl::set_sample_rate_hz(int rate) {
+ CriticalSectionScoped crit_scoped(crit_);
+ return InitializeLocked(rate,
+ rate,
+ rev_in_format_.rate(),
+ fwd_in_format_.num_channels(),
+ fwd_proc_format_.num_channels(),
+ rev_in_format_.num_channels());
+}
+
+int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout,
+ ChannelLayout reverse_layout) {
+ CriticalSectionScoped crit_scoped(crit_);
+ return InitializeLocked(input_sample_rate_hz,
+ output_sample_rate_hz,
+ reverse_sample_rate_hz,
+ ChannelsFromLayout(input_layout),
+ ChannelsFromLayout(output_layout),
+ ChannelsFromLayout(reverse_layout));
+}
- was_stream_delay_set_ = false;
+int AudioProcessingImpl::InitializeLocked() {
+ render_audio_.reset(new AudioBuffer(rev_in_format_.samples_per_channel(),
+ rev_in_format_.num_channels(),
+ rev_proc_format_.samples_per_channel(),
+ rev_proc_format_.num_channels(),
+ rev_proc_format_.samples_per_channel()));
+ capture_audio_.reset(new AudioBuffer(fwd_in_format_.samples_per_channel(),
+ fwd_in_format_.num_channels(),
+ fwd_proc_format_.samples_per_channel(),
+ fwd_proc_format_.num_channels(),
+ fwd_out_format_.samples_per_channel()));
// Initialize all components.
std::list<ProcessingComponent*>::iterator it;
@@ -180,115 +202,228 @@ int AudioProcessingImpl::InitializeLocked() {
return kNoError;
}
-void AudioProcessingImpl::SetExtraOptions(const Config& config) {
- std::list<ProcessingComponent*>::iterator it;
- for (it = component_list_.begin(); it != component_list_.end(); ++it)
- (*it)->SetExtraOptions(config);
-}
+int AudioProcessingImpl::InitializeLocked(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ int num_input_channels,
+ int num_output_channels,
+ int num_reverse_channels) {
+ if (input_sample_rate_hz <= 0 ||
+ output_sample_rate_hz <= 0 ||
+ reverse_sample_rate_hz <= 0) {
+ return kBadSampleRateError;
+ }
+ if (num_output_channels > num_input_channels) {
+ return kBadNumberChannelsError;
+ }
+ // Only mono and stereo supported currently.
+ if (num_input_channels > 2 || num_input_channels < 1 ||
+ num_output_channels > 2 || num_output_channels < 1 ||
+ num_reverse_channels > 2 || num_reverse_channels < 1) {
+ return kBadNumberChannelsError;
+ }
-int AudioProcessingImpl::EnableExperimentalNs(bool enable) {
- return kNoError;
-}
+ fwd_in_format_.set(input_sample_rate_hz, num_input_channels);
+ fwd_out_format_.set(output_sample_rate_hz);
+ rev_in_format_.set(reverse_sample_rate_hz, num_reverse_channels);
-int AudioProcessingImpl::set_sample_rate_hz(int rate) {
- CriticalSectionScoped crit_scoped(crit_);
- if (rate == sample_rate_hz_) {
- return kNoError;
+ // We process at the closest native rate >= min(input rate, output rate)...
+ int min_proc_rate = std::min(fwd_in_format_.rate(), fwd_out_format_.rate());
+ int fwd_proc_rate;
+ if (min_proc_rate > kSampleRate16kHz) {
+ fwd_proc_rate = kSampleRate32kHz;
+ } else if (min_proc_rate > kSampleRate8kHz) {
+ fwd_proc_rate = kSampleRate16kHz;
+ } else {
+ fwd_proc_rate = kSampleRate8kHz;
}
- if (rate != kSampleRate8kHz &&
- rate != kSampleRate16kHz &&
- rate != kSampleRate32kHz) {
- return kBadParameterError;
+ // ...with one exception.
+ if (echo_control_mobile_->is_enabled() && min_proc_rate > kSampleRate16kHz) {
+ fwd_proc_rate = kSampleRate16kHz;
}
- if (echo_control_mobile_->is_enabled() && rate > kSampleRate16kHz) {
- LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
- return kUnsupportedComponentError;
+
+ fwd_proc_format_.set(fwd_proc_rate, num_output_channels);
+
+ // We normally process the reverse stream at 16 kHz. Unless...
+ int rev_proc_rate = kSampleRate16kHz;
+ if (fwd_proc_format_.rate() == kSampleRate8kHz) {
+ // ...the forward stream is at 8 kHz.
+ rev_proc_rate = kSampleRate8kHz;
+ } else {
+ if (rev_in_format_.rate() == kSampleRate32kHz) {
+ // ...or the input is at 32 kHz, in which case we use the splitting
+ // filter rather than the resampler.
+ rev_proc_rate = kSampleRate32kHz;
+ }
}
- sample_rate_hz_ = rate;
- samples_per_channel_ = rate / 100;
+ // TODO(ajm): Enable this.
+ // Always downmix the reverse stream to mono for analysis.
+ //rev_proc_format_.set(rev_proc_rate, 1);
+ rev_proc_format_.set(rev_proc_rate, rev_in_format_.num_channels());
- if (sample_rate_hz_ == kSampleRate32kHz) {
- split_sample_rate_hz_ = kSampleRate16kHz;
+ if (fwd_proc_format_.rate() == kSampleRate32kHz) {
+ split_rate_ = kSampleRate16kHz;
} else {
- split_sample_rate_hz_ = sample_rate_hz_;
+ split_rate_ = fwd_proc_format_.rate();
}
return InitializeLocked();
}
-int AudioProcessingImpl::sample_rate_hz() const {
+// Calls InitializeLocked() if any of the audio parameters have changed from
+// their current values.
+int AudioProcessingImpl::MaybeInitializeLocked(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ int num_input_channels,
+ int num_output_channels,
+ int num_reverse_channels) {
+ if (input_sample_rate_hz == fwd_in_format_.rate() &&
+ output_sample_rate_hz == fwd_out_format_.rate() &&
+ reverse_sample_rate_hz == rev_in_format_.rate() &&
+ num_input_channels == fwd_in_format_.num_channels() &&
+ num_output_channels == fwd_proc_format_.num_channels() &&
+ num_reverse_channels == rev_in_format_.num_channels()) {
+ return kNoError;
+ }
+
+ return InitializeLocked(input_sample_rate_hz,
+ output_sample_rate_hz,
+ reverse_sample_rate_hz,
+ num_input_channels,
+ num_output_channels,
+ num_reverse_channels);
+}
+
+void AudioProcessingImpl::SetExtraOptions(const Config& config) {
CriticalSectionScoped crit_scoped(crit_);
- return sample_rate_hz_;
+ std::list<ProcessingComponent*>::iterator it;
+ for (it = component_list_.begin(); it != component_list_.end(); ++it)
+ (*it)->SetExtraOptions(config);
+}
+
+int AudioProcessingImpl::EnableExperimentalNs(bool enable) {
+ return kNoError;
}
-int AudioProcessingImpl::set_num_reverse_channels(int channels) {
+int AudioProcessingImpl::input_sample_rate_hz() const {
CriticalSectionScoped crit_scoped(crit_);
- if (channels == num_reverse_channels_) {
- return kNoError;
- }
- // Only stereo supported currently.
- if (channels > 2 || channels < 1) {
- return kBadParameterError;
- }
+ return fwd_in_format_.rate();
+}
- num_reverse_channels_ = channels;
+int AudioProcessingImpl::sample_rate_hz() const {
+ CriticalSectionScoped crit_scoped(crit_);
+ return fwd_in_format_.rate();
+}
- return InitializeLocked();
+int AudioProcessingImpl::proc_sample_rate_hz() const {
+ return fwd_proc_format_.rate();
+}
+
+int AudioProcessingImpl::proc_split_sample_rate_hz() const {
+ return split_rate_;
}
int AudioProcessingImpl::num_reverse_channels() const {
- return num_reverse_channels_;
+ return rev_proc_format_.num_channels();
+}
+
+int AudioProcessingImpl::num_input_channels() const {
+ return fwd_in_format_.num_channels();
+}
+
+int AudioProcessingImpl::num_output_channels() const {
+ return fwd_proc_format_.num_channels();
+}
+
+void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
+ output_will_be_muted_ = muted;
}
-int AudioProcessingImpl::set_num_channels(
- int input_channels,
- int output_channels) {
+bool AudioProcessingImpl::output_will_be_muted() const {
+ return output_will_be_muted_;
+}
+
+int AudioProcessingImpl::ProcessStream(const float* const* src,
+ int samples_per_channel,
+ int input_sample_rate_hz,
+ ChannelLayout input_layout,
+ int output_sample_rate_hz,
+ ChannelLayout output_layout,
+ float* const* dest) {
CriticalSectionScoped crit_scoped(crit_);
- if (input_channels == num_input_channels_ &&
- output_channels == num_output_channels_) {
- return kNoError;
- }
- if (output_channels > input_channels) {
- return kBadParameterError;
+ if (!src || !dest) {
+ return kNullPointerError;
}
- // Only stereo supported currently.
- if (input_channels > 2 || input_channels < 1 ||
- output_channels > 2 || output_channels < 1) {
- return kBadParameterError;
+
+ RETURN_ON_ERR(MaybeInitializeLocked(input_sample_rate_hz,
+ output_sample_rate_hz,
+ rev_in_format_.rate(),
+ ChannelsFromLayout(input_layout),
+ ChannelsFromLayout(output_layout),
+ rev_in_format_.num_channels()));
+ if (samples_per_channel != fwd_in_format_.samples_per_channel()) {
+ return kBadDataLengthError;
}
- num_input_channels_ = input_channels;
- num_output_channels_ = output_channels;
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ event_msg_->set_type(audioproc::Event::STREAM);
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t channel_size = sizeof(float) * samples_per_channel;
+ for (int i = 0; i < fwd_in_format_.num_channels(); ++i)
+ msg->add_input_channel(src[i], channel_size);
+ }
+#endif
- return InitializeLocked();
-}
+ capture_audio_->CopyFrom(src, samples_per_channel, input_layout);
+ RETURN_ON_ERR(ProcessStreamLocked());
+ if (output_copy_needed(is_data_processed())) {
+ capture_audio_->CopyTo(fwd_out_format_.samples_per_channel(),
+ output_layout,
+ dest);
+ }
-int AudioProcessingImpl::num_input_channels() const {
- return num_input_channels_;
-}
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t channel_size = sizeof(float) * samples_per_channel;
+ for (int i = 0; i < fwd_proc_format_.num_channels(); ++i)
+ msg->add_output_channel(dest[i], channel_size);
+ RETURN_ON_ERR(WriteMessageToDebugFile());
+ }
+#endif
-int AudioProcessingImpl::num_output_channels() const {
- return num_output_channels_;
+ return kNoError;
}
int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
CriticalSectionScoped crit_scoped(crit_);
- int err = kNoError;
-
- if (frame == NULL) {
+ if (!frame) {
return kNullPointerError;
}
-
- if (frame->sample_rate_hz_ != sample_rate_hz_) {
+ // Must be a native rate.
+ if (frame->sample_rate_hz_ != kSampleRate8kHz &&
+ frame->sample_rate_hz_ != kSampleRate16kHz &&
+ frame->sample_rate_hz_ != kSampleRate32kHz) {
return kBadSampleRateError;
}
-
- if (frame->num_channels_ != num_input_channels_) {
- return kBadNumberChannelsError;
+ if (echo_control_mobile_->is_enabled() &&
+ frame->sample_rate_hz_ > kSampleRate16kHz) {
+ LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
+ return kUnsupportedComponentError;
}
- if (frame->samples_per_channel_ != samples_per_channel_) {
+ // TODO(ajm): The input and output rates and channels are currently
+ // constrained to be identical in the int16 interface.
+ RETURN_ON_ERR(MaybeInitializeLocked(frame->sample_rate_hz_,
+ frame->sample_rate_hz_,
+ rev_in_format_.rate(),
+ frame->num_channels_,
+ frame->num_channels_,
+ rev_in_format_.num_channels()));
+ if (frame->samples_per_channel_ != fwd_in_format_.samples_per_channel()) {
return kBadDataLengthError;
}
@@ -300,126 +435,142 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
frame->samples_per_channel_ *
frame->num_channels_;
msg->set_input_data(frame->data_, data_size);
- msg->set_delay(stream_delay_ms_);
- msg->set_drift(echo_cancellation_->stream_drift_samples());
- msg->set_level(gain_control_->stream_analog_level());
}
#endif
capture_audio_->DeinterleaveFrom(frame);
+ RETURN_ON_ERR(ProcessStreamLocked());
+ capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
- // TODO(ajm): experiment with mixing and AEC placement.
- if (num_output_channels_ < num_input_channels_) {
- capture_audio_->Mix(num_output_channels_);
- frame->num_channels_ = num_output_channels_;
- }
-
- bool data_processed = is_data_processed();
- if (analysis_needed(data_processed)) {
- for (int i = 0; i < num_output_channels_; i++) {
- // Split into a low and high band.
- SplittingFilterAnalysis(capture_audio_->data(i),
- capture_audio_->low_pass_split_data(i),
- capture_audio_->high_pass_split_data(i),
- capture_audio_->analysis_filter_state1(i),
- capture_audio_->analysis_filter_state2(i));
- }
- }
-
- err = high_pass_filter_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = gain_control_->AnalyzeCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ const size_t data_size = sizeof(int16_t) *
+ frame->samples_per_channel_ *
+ frame->num_channels_;
+ msg->set_output_data(frame->data_, data_size);
+ RETURN_ON_ERR(WriteMessageToDebugFile());
}
+#endif
- err = echo_cancellation_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
+ return kNoError;
+}
- if (echo_control_mobile_->is_enabled() &&
- noise_suppression_->is_enabled()) {
- capture_audio_->CopyLowPassToReference();
- }
- err = noise_suppression_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
+int AudioProcessingImpl::ProcessStreamLocked() {
+#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
+ if (debug_file_->Open()) {
+ audioproc::Stream* msg = event_msg_->mutable_stream();
+ msg->set_delay(stream_delay_ms_);
+ msg->set_drift(echo_cancellation_->stream_drift_samples());
+ msg->set_level(gain_control_->stream_analog_level());
+ msg->set_keypress(key_pressed_);
}
+#endif
- err = echo_control_mobile_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
+ AudioBuffer* ca = capture_audio_.get(); // For brevity.
+ bool data_processed = is_data_processed();
+ if (analysis_needed(data_processed)) {
+ for (int i = 0; i < fwd_proc_format_.num_channels(); i++) {
+ // Split into a low and high band.
+ WebRtcSpl_AnalysisQMF(ca->data(i),
+ ca->samples_per_channel(),
+ ca->low_pass_split_data(i),
+ ca->high_pass_split_data(i),
+ ca->filter_states(i)->analysis_filter_state1,
+ ca->filter_states(i)->analysis_filter_state2);
+ }
}
- err = voice_detection_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
+ RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
+ RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
- err = gain_control_->ProcessCaptureAudio(capture_audio_);
- if (err != kNoError) {
- return err;
+ if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
+ ca->CopyLowPassToReference();
}
+ RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
+ RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
+ RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
+ RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
if (synthesis_needed(data_processed)) {
- for (int i = 0; i < num_output_channels_; i++) {
+ for (int i = 0; i < fwd_proc_format_.num_channels(); i++) {
// Recombine low and high bands.
- SplittingFilterSynthesis(capture_audio_->low_pass_split_data(i),
- capture_audio_->high_pass_split_data(i),
- capture_audio_->data(i),
- capture_audio_->synthesis_filter_state1(i),
- capture_audio_->synthesis_filter_state2(i));
+ WebRtcSpl_SynthesisQMF(ca->low_pass_split_data(i),
+ ca->high_pass_split_data(i),
+ ca->samples_per_split_channel(),
+ ca->data(i),
+ ca->filter_states(i)->synthesis_filter_state1,
+ ca->filter_states(i)->synthesis_filter_state2);
}
}
// The level estimator operates on the recombined data.
- err = level_estimator_->ProcessStream(capture_audio_);
- if (err != kNoError) {
- return err;
+ RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
+
+ was_stream_delay_set_ = false;
+ return kNoError;
+}
+
+int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout layout) {
+ CriticalSectionScoped crit_scoped(crit_);
+ if (data == NULL) {
+ return kNullPointerError;
}
- capture_audio_->InterleaveTo(frame, interleave_needed(data_processed));
+ const int num_channels = ChannelsFromLayout(layout);
+ RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
+ fwd_out_format_.rate(),
+ sample_rate_hz,
+ fwd_in_format_.num_channels(),
+ fwd_proc_format_.num_channels(),
+ num_channels));
+ if (samples_per_channel != rev_in_format_.samples_per_channel()) {
+ return kBadDataLengthError;
+ }
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
if (debug_file_->Open()) {
- audioproc::Stream* msg = event_msg_->mutable_stream();
- const size_t data_size = sizeof(int16_t) *
- frame->samples_per_channel_ *
- frame->num_channels_;
- msg->set_output_data(frame->data_, data_size);
- err = WriteMessageToDebugFile();
- if (err != kNoError) {
- return err;
- }
+ event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
+ audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
+ const size_t channel_size = sizeof(float) * samples_per_channel;
+ for (int i = 0; i < num_channels; ++i)
+ msg->add_channel(data[i], channel_size);
+ RETURN_ON_ERR(WriteMessageToDebugFile());
}
#endif
- was_stream_delay_set_ = false;
- return kNoError;
+ render_audio_->CopyFrom(data, samples_per_channel, layout);
+ return AnalyzeReverseStreamLocked();
}
int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
CriticalSectionScoped crit_scoped(crit_);
- int err = kNoError;
-
if (frame == NULL) {
return kNullPointerError;
}
-
- if (frame->sample_rate_hz_ != sample_rate_hz_) {
+ // Must be a native rate.
+ if (frame->sample_rate_hz_ != kSampleRate8kHz &&
+ frame->sample_rate_hz_ != kSampleRate16kHz &&
+ frame->sample_rate_hz_ != kSampleRate32kHz) {
return kBadSampleRateError;
}
-
- if (frame->num_channels_ != num_reverse_channels_) {
- return kBadNumberChannelsError;
+ // This interface does not tolerate different forward and reverse rates.
+ if (frame->sample_rate_hz_ != fwd_in_format_.rate()) {
+ return kBadSampleRateError;
}
- if (frame->samples_per_channel_ != samples_per_channel_) {
+ RETURN_ON_ERR(MaybeInitializeLocked(fwd_in_format_.rate(),
+ fwd_out_format_.rate(),
+ frame->sample_rate_hz_,
+ fwd_in_format_.num_channels(),
+ fwd_in_format_.num_channels(),
+ frame->num_channels_));
+ if (frame->samples_per_channel_ != rev_in_format_.samples_per_channel()) {
return kBadDataLengthError;
}
@@ -431,44 +582,33 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
frame->samples_per_channel_ *
frame->num_channels_;
msg->set_data(frame->data_, data_size);
- err = WriteMessageToDebugFile();
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(WriteMessageToDebugFile());
}
#endif
render_audio_->DeinterleaveFrom(frame);
+ return AnalyzeReverseStreamLocked();
+}
- // TODO(ajm): turn the splitting filter into a component?
- if (sample_rate_hz_ == kSampleRate32kHz) {
- for (int i = 0; i < num_reverse_channels_; i++) {
+int AudioProcessingImpl::AnalyzeReverseStreamLocked() {
+ AudioBuffer* ra = render_audio_.get(); // For brevity.
+ if (rev_proc_format_.rate() == kSampleRate32kHz) {
+ for (int i = 0; i < rev_proc_format_.num_channels(); i++) {
// Split into low and high band.
- SplittingFilterAnalysis(render_audio_->data(i),
- render_audio_->low_pass_split_data(i),
- render_audio_->high_pass_split_data(i),
- render_audio_->analysis_filter_state1(i),
- render_audio_->analysis_filter_state2(i));
+ WebRtcSpl_AnalysisQMF(ra->data(i),
+ ra->samples_per_channel(),
+ ra->low_pass_split_data(i),
+ ra->high_pass_split_data(i),
+ ra->filter_states(i)->analysis_filter_state1,
+ ra->filter_states(i)->analysis_filter_state2);
}
}
- // TODO(ajm): warnings possible from components?
- err = echo_cancellation_->ProcessRenderAudio(render_audio_);
- if (err != kNoError) {
- return err;
- }
-
- err = echo_control_mobile_->ProcessRenderAudio(render_audio_);
- if (err != kNoError) {
- return err;
- }
+ RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
+ RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
+ RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
- err = gain_control_->ProcessRenderAudio(render_audio_);
- if (err != kNoError) {
- return err;
- }
-
- return err; // TODO(ajm): this is for returning warnings; necessary?
+ return kNoError;
}
int AudioProcessingImpl::set_stream_delay_ms(int delay) {
@@ -499,6 +639,14 @@ bool AudioProcessingImpl::was_stream_delay_set() const {
return was_stream_delay_set_;
}
+void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
+ key_pressed_ = key_pressed;
+}
+
+bool AudioProcessingImpl::stream_key_pressed() const {
+ return key_pressed_;
+}
+
void AudioProcessingImpl::set_delay_offset_ms(int offset) {
CriticalSectionScoped crit_scoped(crit_);
delay_offset_ms_ = offset;
@@ -613,13 +761,6 @@ VoiceDetection* AudioProcessingImpl::voice_detection() const {
return voice_detection_;
}
-int32_t AudioProcessingImpl::ChangeUniqueId(const int32_t id) {
- CriticalSectionScoped crit_scoped(crit_);
- id_ = id;
-
- return kNoError;
-}
-
bool AudioProcessingImpl::is_data_processed() const {
int enabled_count = 0;
std::list<ProcessingComponent*>::const_iterator it;
@@ -645,20 +786,21 @@ bool AudioProcessingImpl::is_data_processed() const {
return true;
}
-bool AudioProcessingImpl::interleave_needed(bool is_data_processed) const {
+bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
// Check if we've upmixed or downmixed the audio.
- return (num_output_channels_ != num_input_channels_ || is_data_processed);
+ return ((fwd_proc_format_.num_channels() != fwd_in_format_.num_channels()) ||
+ is_data_processed);
}
bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
- return (is_data_processed && sample_rate_hz_ == kSampleRate32kHz);
+ return (is_data_processed && fwd_proc_format_.rate() == kSampleRate32kHz);
}
bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
if (!is_data_processed && !voice_detection_->is_enabled()) {
// Only level_estimator_ is enabled.
return false;
- } else if (sample_rate_hz_ == kSampleRate32kHz) {
+ } else if (fwd_proc_format_.rate() == kSampleRate32kHz) {
// Something besides level_estimator_ is enabled, and we have super-wb.
return true;
}
@@ -690,17 +832,18 @@ int AudioProcessingImpl::WriteMessageToDebugFile() {
event_msg_->Clear();
- return 0;
+ return kNoError;
}
int AudioProcessingImpl::WriteInitMessage() {
event_msg_->set_type(audioproc::Event::INIT);
audioproc::Init* msg = event_msg_->mutable_init();
- msg->set_sample_rate(sample_rate_hz_);
- msg->set_device_sample_rate(echo_cancellation_->device_sample_rate_hz());
- msg->set_num_input_channels(num_input_channels_);
- msg->set_num_output_channels(num_output_channels_);
- msg->set_num_reverse_channels(num_reverse_channels_);
+ msg->set_sample_rate(fwd_in_format_.rate());
+ msg->set_num_input_channels(fwd_in_format_.num_channels());
+ msg->set_num_output_channels(fwd_proc_format_.num_channels());
+ msg->set_num_reverse_channels(rev_in_format_.num_channels());
+ msg->set_reverse_sample_rate(rev_in_format_.rate());
+ msg->set_output_sample_rate(fwd_out_format_.rate());
int err = WriteMessageToDebugFile();
if (err != kNoError) {
@@ -710,4 +853,5 @@ int AudioProcessingImpl::WriteInitMessage() {
return kNoError;
}
#endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h
index e48a2c18a4f..d34f305a96b 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl.h
@@ -19,9 +19,10 @@
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
+
class AudioBuffer;
class CriticalSectionWrapper;
-class EchoCancellationImplWrapper;
+class EchoCancellationImpl;
class EchoControlMobileImpl;
class FileWrapper;
class GainControlImpl;
@@ -39,44 +40,92 @@ class Event;
} // namespace audioproc
#endif
-class AudioProcessingImpl : public AudioProcessing {
+class AudioRate {
public:
- enum {
- kSampleRate8kHz = 8000,
- kSampleRate16kHz = 16000,
- kSampleRate32kHz = 32000
- };
+ explicit AudioRate(int sample_rate_hz)
+ : rate_(sample_rate_hz),
+ samples_per_channel_(AudioProcessing::kChunkSizeMs * rate_ / 1000) {}
+ virtual ~AudioRate() {}
+
+ void set(int rate) {
+ rate_ = rate;
+ samples_per_channel_ = AudioProcessing::kChunkSizeMs * rate_ / 1000;
+ }
- explicit AudioProcessingImpl(int id);
- virtual ~AudioProcessingImpl();
+ int rate() const { return rate_; }
+ int samples_per_channel() const { return samples_per_channel_; }
- CriticalSectionWrapper* crit() const;
+ private:
+ int rate_;
+ int samples_per_channel_;
+};
- int split_sample_rate_hz() const;
- bool was_stream_delay_set() const;
+class AudioFormat : public AudioRate {
+ public:
+ AudioFormat(int sample_rate_hz, int num_channels)
+ : AudioRate(sample_rate_hz),
+ num_channels_(num_channels) {}
+ virtual ~AudioFormat() {}
+
+ void set(int rate, int num_channels) {
+ AudioRate::set(rate);
+ num_channels_ = num_channels;
+ }
+
+ int num_channels() const { return num_channels_; }
+
+ private:
+ int num_channels_;
+};
+
+class AudioProcessingImpl : public AudioProcessing {
+ public:
+ explicit AudioProcessingImpl(const Config& config);
+ virtual ~AudioProcessingImpl();
// AudioProcessing methods.
virtual int Initialize() OVERRIDE;
- virtual int InitializeLocked();
+ virtual int Initialize(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout,
+ ChannelLayout reverse_layout) OVERRIDE;
virtual void SetExtraOptions(const Config& config) OVERRIDE;
virtual int EnableExperimentalNs(bool enable) OVERRIDE;
virtual bool experimental_ns_enabled() const OVERRIDE {
return false;
}
virtual int set_sample_rate_hz(int rate) OVERRIDE;
+ virtual int input_sample_rate_hz() const OVERRIDE;
virtual int sample_rate_hz() const OVERRIDE;
- virtual int set_num_channels(int input_channels,
- int output_channels) OVERRIDE;
+ virtual int proc_sample_rate_hz() const OVERRIDE;
+ virtual int proc_split_sample_rate_hz() const OVERRIDE;
virtual int num_input_channels() const OVERRIDE;
virtual int num_output_channels() const OVERRIDE;
- virtual int set_num_reverse_channels(int channels) OVERRIDE;
virtual int num_reverse_channels() const OVERRIDE;
+ virtual void set_output_will_be_muted(bool muted) OVERRIDE;
+ virtual bool output_will_be_muted() const OVERRIDE;
virtual int ProcessStream(AudioFrame* frame) OVERRIDE;
+ virtual int ProcessStream(const float* const* src,
+ int samples_per_channel,
+ int input_sample_rate_hz,
+ ChannelLayout input_layout,
+ int output_sample_rate_hz,
+ ChannelLayout output_layout,
+ float* const* dest) OVERRIDE;
virtual int AnalyzeReverseStream(AudioFrame* frame) OVERRIDE;
+ virtual int AnalyzeReverseStream(const float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout layout) OVERRIDE;
virtual int set_stream_delay_ms(int delay) OVERRIDE;
virtual int stream_delay_ms() const OVERRIDE;
+ virtual bool was_stream_delay_set() const OVERRIDE;
virtual void set_delay_offset_ms(int offset) OVERRIDE;
virtual int delay_offset_ms() const OVERRIDE;
+ virtual void set_stream_key_pressed(bool key_pressed) OVERRIDE;
+ virtual bool stream_key_pressed() const OVERRIDE;
virtual int StartDebugRecording(
const char filename[kMaxFilenameSize]) OVERRIDE;
virtual int StartDebugRecording(FILE* handle) OVERRIDE;
@@ -89,18 +138,32 @@ class AudioProcessingImpl : public AudioProcessing {
virtual NoiseSuppression* noise_suppression() const OVERRIDE;
virtual VoiceDetection* voice_detection() const OVERRIDE;
- // Module methods.
- virtual int32_t ChangeUniqueId(const int32_t id) OVERRIDE;
+ protected:
+ // Overridden in a mock.
+ virtual int InitializeLocked();
private:
+ int InitializeLocked(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ int num_input_channels,
+ int num_output_channels,
+ int num_reverse_channels);
+ int MaybeInitializeLocked(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ int num_input_channels,
+ int num_output_channels,
+ int num_reverse_channels);
+ int ProcessStreamLocked();
+ int AnalyzeReverseStreamLocked();
+
bool is_data_processed() const;
- bool interleave_needed(bool is_data_processed) const;
+ bool output_copy_needed(bool is_data_processed) const;
bool synthesis_needed(bool is_data_processed) const;
bool analysis_needed(bool is_data_processed) const;
- int id_;
-
- EchoCancellationImplWrapper* echo_cancellation_;
+ EchoCancellationImpl* echo_cancellation_;
EchoControlMobileImpl* echo_control_mobile_;
GainControlImpl* gain_control_;
HighPassFilterImpl* high_pass_filter_;
@@ -110,29 +173,34 @@ class AudioProcessingImpl : public AudioProcessing {
std::list<ProcessingComponent*> component_list_;
CriticalSectionWrapper* crit_;
- AudioBuffer* render_audio_;
- AudioBuffer* capture_audio_;
+ scoped_ptr<AudioBuffer> render_audio_;
+ scoped_ptr<AudioBuffer> capture_audio_;
#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
// TODO(andrew): make this more graceful. Ideally we would split this stuff
// out into a separate class with an "enabled" and "disabled" implementation.
int WriteMessageToDebugFile();
int WriteInitMessage();
scoped_ptr<FileWrapper> debug_file_;
- scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
- std::string event_str_; // Memory for protobuf serialization.
+ scoped_ptr<audioproc::Event> event_msg_; // Protobuf message.
+ std::string event_str_; // Memory for protobuf serialization.
#endif
- int sample_rate_hz_;
- int split_sample_rate_hz_;
- int samples_per_channel_;
+ AudioFormat fwd_in_format_;
+ AudioFormat fwd_proc_format_;
+ AudioRate fwd_out_format_;
+ AudioFormat rev_in_format_;
+ AudioFormat rev_proc_format_;
+ int split_rate_;
+
int stream_delay_ms_;
int delay_offset_ms_;
bool was_stream_delay_set_;
- int num_reverse_channels_;
- int num_input_channels_;
- int num_output_channels_;
+ bool output_will_be_muted_;
+
+ bool key_pressed_;
};
+
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_PROCESSING_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
new file mode 100644
index 00000000000..09576175756
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_impl_unittest.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/config.h"
+#include "webrtc/modules/audio_processing/test/test_utils.h"
+#include "webrtc/modules/interface/module_common_types.h"
+
+using ::testing::Invoke;
+using ::testing::Return;
+
+namespace webrtc {
+
+class MockInitialize : public AudioProcessingImpl {
+ public:
+ explicit MockInitialize(const Config& config) : AudioProcessingImpl(config) {
+ }
+
+ MOCK_METHOD0(InitializeLocked, int());
+ int RealInitializeLocked() { return AudioProcessingImpl::InitializeLocked(); }
+};
+
+TEST(AudioProcessingImplTest, AudioParameterChangeTriggersInit) {
+ Config config;
+ MockInitialize mock(config);
+ ON_CALL(mock, InitializeLocked())
+ .WillByDefault(Invoke(&mock, &MockInitialize::RealInitializeLocked));
+
+ EXPECT_CALL(mock, InitializeLocked()).Times(1);
+ mock.Initialize();
+
+ AudioFrame frame;
+ // Call with the default parameters; there should be no init.
+ frame.num_channels_ = 1;
+ SetFrameSampleRate(&frame, 16000);
+ EXPECT_CALL(mock, InitializeLocked())
+ .Times(0);
+ EXPECT_NOERR(mock.ProcessStream(&frame));
+ EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
+
+ // New sample rate. (Only impacts ProcessStream).
+ SetFrameSampleRate(&frame, 32000);
+ EXPECT_CALL(mock, InitializeLocked())
+ .Times(1);
+ EXPECT_NOERR(mock.ProcessStream(&frame));
+
+ // New number of channels.
+ frame.num_channels_ = 2;
+ EXPECT_CALL(mock, InitializeLocked())
+ .Times(2);
+ EXPECT_NOERR(mock.ProcessStream(&frame));
+ // ProcessStream sets num_channels_ == num_output_channels.
+ frame.num_channels_ = 2;
+ EXPECT_NOERR(mock.AnalyzeReverseStream(&frame));
+
+ // A new sample rate passed to AnalyzeReverseStream should be an error and
+ // not cause an init.
+ SetFrameSampleRate(&frame, 16000);
+ EXPECT_CALL(mock, InitializeLocked())
+ .Times(0);
+ EXPECT_EQ(mock.kBadSampleRateError, mock.AnalyzeReverseStream(&frame));
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi
index 05d7514bded..82aa7fd14fd 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_processing_tests.gypi
@@ -7,25 +7,23 @@
# be found in the AUTHORS file in the root of the source tree.
{
- 'targets': [
- {
- 'target_name': 'audioproc_unittest_proto',
- 'type': 'static_library',
- 'sources': [ 'test/unittest.proto', ],
- 'variables': {
- 'proto_in_dir': 'test',
- # Workaround to protect against gyp's pathname relativization when this
- # file is included by modules.gyp.
- 'proto_out_protected': 'webrtc/audio_processing',
- 'proto_out_dir': '<(proto_out_protected)',
- },
- 'includes': [ '../../build/protoc.gypi', ],
- },
- ],
'conditions': [
['enable_protobuf==1', {
'targets': [
{
+ 'target_name': 'audioproc_unittest_proto',
+ 'type': 'static_library',
+ 'sources': [ 'test/unittest.proto', ],
+ 'variables': {
+ 'proto_in_dir': 'test',
+ # Workaround to protect against gyp's pathname relativization when
+ # this file is included by modules.gyp.
+ 'proto_out_protected': 'webrtc/audio_processing',
+ 'proto_out_dir': '<(proto_out_protected)',
+ },
+ 'includes': [ '../../build/protoc.gypi', ],
+ },
+ {
'target_name': 'audioproc',
'type': 'executable',
'dependencies': [
diff --git a/chromium/third_party/webrtc/modules/audio_processing/common.h b/chromium/third_party/webrtc/modules/audio_processing/common.h
new file mode 100644
index 00000000000..42454df299f
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/common.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_COMMON_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_COMMON_H_
+
+#include <assert.h>
+#include <string.h>
+
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+
+namespace webrtc {
+
+static inline int ChannelsFromLayout(AudioProcessing::ChannelLayout layout) {
+ switch (layout) {
+ case AudioProcessing::kMono:
+ case AudioProcessing::kMonoAndKeyboard:
+ return 1;
+ case AudioProcessing::kStereo:
+ case AudioProcessing::kStereoAndKeyboard:
+ return 2;
+ }
+ assert(false);
+ return -1;
+}
+
+// Helper to encapsulate a contiguous data buffer with access to a pointer
+// array of the deinterleaved channels.
+template <typename T>
+class ChannelBuffer {
+ public:
+ ChannelBuffer(int samples_per_channel, int num_channels)
+ : data_(new T[samples_per_channel * num_channels]),
+ channels_(new T*[num_channels]),
+ samples_per_channel_(samples_per_channel),
+ num_channels_(num_channels) {
+ memset(data_.get(), 0, sizeof(T) * samples_per_channel * num_channels);
+ for (int i = 0; i < num_channels; ++i)
+ channels_[i] = &data_[i * samples_per_channel];
+ }
+ ~ChannelBuffer() {}
+
+ void CopyFrom(const void* channel_ptr, int i) {
+ assert(i < num_channels_);
+ memcpy(channels_[i], channel_ptr, samples_per_channel_ * sizeof(T));
+ }
+
+ T* data() { return data_.get(); }
+ T* channel(int i) {
+ assert(i < num_channels_);
+ return channels_[i];
+ }
+ T** channels() { return channels_.get(); }
+
+ int samples_per_channel() { return samples_per_channel_; }
+ int num_channels() { return num_channels_; }
+ int length() { return samples_per_channel_ * num_channels_; }
+
+ private:
+ scoped_ptr<T[]> data_;
+ scoped_ptr<T*[]> channels_;
+ int samples_per_channel_;
+ int num_channels_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_COMMON_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/debug.proto b/chromium/third_party/webrtc/modules/audio_processing/debug.proto
index 4b3a1638941..dce2f792093 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/debug.proto
+++ b/chromium/third_party/webrtc/modules/audio_processing/debug.proto
@@ -4,22 +4,39 @@ package webrtc.audioproc;
message Init {
optional int32 sample_rate = 1;
- optional int32 device_sample_rate = 2;
+ optional int32 device_sample_rate = 2 [deprecated=true];
optional int32 num_input_channels = 3;
optional int32 num_output_channels = 4;
optional int32 num_reverse_channels = 5;
+ optional int32 reverse_sample_rate = 6;
+ optional int32 output_sample_rate = 7;
}
+// May contain interleaved or deinterleaved data, but don't store both formats.
message ReverseStream {
+ // int16 interleaved data.
optional bytes data = 1;
+
+ // float deinterleaved data, where each repeated element points to a single
+ // channel buffer of data.
+ repeated bytes channel = 2;
}
+// May contain interleaved or deinterleaved data, but don't store both formats.
message Stream {
+ // int16 interleaved data.
optional bytes input_data = 1;
optional bytes output_data = 2;
+
optional int32 delay = 3;
optional sint32 drift = 4;
optional int32 level = 5;
+ optional bool keypress = 6;
+
+ // float deinterleaved data, where each repeated element points to a single
+ // channel buffer of data.
+ repeated bytes input_channel = 7;
+ repeated bytes output_channel = 8;
}
message Event {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc
index cd12363ec5e..e770f9fe377 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.cc
@@ -18,7 +18,6 @@ extern "C" {
}
#include "webrtc/modules/audio_processing/aec/include/echo_cancellation.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
@@ -56,23 +55,20 @@ AudioProcessing::Error MapError(int err) {
}
} // namespace
-EchoCancellationImplWrapper* EchoCancellationImplWrapper::Create(
- const AudioProcessingImpl* audioproc) {
- return new EchoCancellationImpl(audioproc);
-}
-
-EchoCancellationImpl::EchoCancellationImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+EchoCancellationImpl::EchoCancellationImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
drift_compensation_enabled_(false),
metrics_enabled_(false),
suppression_level_(kModerateSuppression),
- device_sample_rate_hz_(48000),
stream_drift_samples_(0),
was_stream_drift_set_(false),
stream_has_echo_(false),
delay_logging_enabled_(false),
- delay_correction_enabled_(false) {}
+ delay_correction_enabled_(false),
+ reported_delay_enabled_(true) {}
EchoCancellationImpl::~EchoCancellationImpl() {}
@@ -133,10 +129,10 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = handle(handle_index);
err = WebRtcAec_Process(
my_handle,
- audio->low_pass_split_data(i),
- audio->high_pass_split_data(i),
- audio->low_pass_split_data(i),
- audio->high_pass_split_data(i),
+ audio->low_pass_split_data_f(i),
+ audio->high_pass_split_data_f(i),
+ audio->low_pass_split_data_f(i),
+ audio->high_pass_split_data_f(i),
static_cast<int16_t>(audio->samples_per_split_channel()),
apm_->stream_delay_ms(),
stream_drift_samples_);
@@ -168,7 +164,7 @@ int EchoCancellationImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int EchoCancellationImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
// Ensure AEC and AECM are not both enabled.
if (enable && apm_->echo_control_mobile()->is_enabled()) {
return apm_->kBadParameterError;
@@ -182,7 +178,7 @@ bool EchoCancellationImpl::is_enabled() const {
}
int EchoCancellationImpl::set_suppression_level(SuppressionLevel level) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(level) == -1) {
return apm_->kBadParameterError;
}
@@ -197,7 +193,7 @@ EchoCancellation::SuppressionLevel EchoCancellationImpl::suppression_level()
}
int EchoCancellationImpl::enable_drift_compensation(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
drift_compensation_enabled_ = enable;
return Configure();
}
@@ -206,20 +202,6 @@ bool EchoCancellationImpl::is_drift_compensation_enabled() const {
return drift_compensation_enabled_;
}
-int EchoCancellationImpl::set_device_sample_rate_hz(int rate) {
- CriticalSectionScoped crit_scoped(apm_->crit());
- if (rate < 8000 || rate > 96000) {
- return apm_->kBadParameterError;
- }
-
- device_sample_rate_hz_ = rate;
- return Initialize();
-}
-
-int EchoCancellationImpl::device_sample_rate_hz() const {
- return device_sample_rate_hz_;
-}
-
void EchoCancellationImpl::set_stream_drift_samples(int drift) {
was_stream_drift_set_ = true;
stream_drift_samples_ = drift;
@@ -230,7 +212,7 @@ int EchoCancellationImpl::stream_drift_samples() const {
}
int EchoCancellationImpl::enable_metrics(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
metrics_enabled_ = enable;
return Configure();
}
@@ -242,7 +224,7 @@ bool EchoCancellationImpl::are_metrics_enabled() const {
// TODO(ajm): we currently just use the metrics from the first AEC. Think more
// aboue the best way to extend this to multi-channel.
int EchoCancellationImpl::GetMetrics(Metrics* metrics) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (metrics == NULL) {
return apm_->kNullPointerError;
}
@@ -289,7 +271,7 @@ bool EchoCancellationImpl::stream_has_echo() const {
}
int EchoCancellationImpl::enable_delay_logging(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
delay_logging_enabled_ = enable;
return Configure();
}
@@ -300,7 +282,7 @@ bool EchoCancellationImpl::is_delay_logging_enabled() const {
// TODO(bjornv): How should we handle the multi-channel case?
int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (median == NULL) {
return apm_->kNullPointerError;
}
@@ -322,7 +304,7 @@ int EchoCancellationImpl::GetDelayMetrics(int* median, int* std) {
}
struct AecCore* EchoCancellationImpl::aec_core() const {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (!is_component_enabled()) {
return NULL;
}
@@ -336,13 +318,12 @@ int EchoCancellationImpl::Initialize() {
return err;
}
- was_stream_drift_set_ = false;
-
return apm_->kNoError;
}
void EchoCancellationImpl::SetExtraOptions(const Config& config) {
delay_correction_enabled_ = config.Get<DelayCorrection>().enabled;
+ reported_delay_enabled_ = config.Get<ReportedDelay>().enabled;
Configure();
}
@@ -357,16 +338,19 @@ void* EchoCancellationImpl::CreateHandle() const {
return handle;
}
-int EchoCancellationImpl::DestroyHandle(void* handle) const {
+void EchoCancellationImpl::DestroyHandle(void* handle) const {
assert(handle != NULL);
- return WebRtcAec_Free(static_cast<Handle*>(handle));
+ WebRtcAec_Free(static_cast<Handle*>(handle));
}
int EchoCancellationImpl::InitializeHandle(void* handle) const {
assert(handle != NULL);
+ // TODO(ajm): Drift compensation is disabled in practice. If restored, it
+ // should be managed internally and not depend on the hardware sample rate.
+ // For now, just hardcode a 48 kHz value.
return WebRtcAec_Init(static_cast<Handle*>(handle),
- apm_->sample_rate_hz(),
- device_sample_rate_hz_);
+ apm_->proc_sample_rate_hz(),
+ 48000);
}
int EchoCancellationImpl::ConfigureHandle(void* handle) const {
@@ -379,6 +363,8 @@ int EchoCancellationImpl::ConfigureHandle(void* handle) const {
WebRtcAec_enable_delay_correction(WebRtcAec_aec_core(
static_cast<Handle*>(handle)), delay_correction_enabled_ ? 1 : 0);
+ WebRtcAec_enable_reported_delay(WebRtcAec_aec_core(
+ static_cast<Handle*>(handle)), reported_delay_enabled_ ? 1 : 0);
return WebRtcAec_set_config(static_cast<Handle*>(handle), config);
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h
index 3ab0ce26689..b9c116a0650 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl.h
@@ -11,25 +11,26 @@
#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_H_
-#include "webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
class AudioBuffer;
+class CriticalSectionWrapper;
-class EchoCancellationImpl : public EchoCancellationImplWrapper {
+class EchoCancellationImpl : public EchoCancellation,
+ public ProcessingComponent {
public:
- explicit EchoCancellationImpl(const AudioProcessingImpl* apm);
+ EchoCancellationImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~EchoCancellationImpl();
- // EchoCancellationImplWrapper implementation.
- virtual int ProcessRenderAudio(const AudioBuffer* audio) OVERRIDE;
- virtual int ProcessCaptureAudio(AudioBuffer* audio) OVERRIDE;
+ int ProcessRenderAudio(const AudioBuffer* audio);
+ int ProcessCaptureAudio(AudioBuffer* audio);
// EchoCancellation implementation.
virtual bool is_enabled() const OVERRIDE;
- virtual int device_sample_rate_hz() const OVERRIDE;
virtual int stream_drift_samples() const OVERRIDE;
// ProcessingComponent implementation.
@@ -41,7 +42,6 @@ class EchoCancellationImpl : public EchoCancellationImplWrapper {
virtual int Enable(bool enable) OVERRIDE;
virtual int enable_drift_compensation(bool enable) OVERRIDE;
virtual bool is_drift_compensation_enabled() const OVERRIDE;
- virtual int set_device_sample_rate_hz(int rate) OVERRIDE;
virtual void set_stream_drift_samples(int drift) OVERRIDE;
virtual int set_suppression_level(SuppressionLevel level) OVERRIDE;
virtual SuppressionLevel suppression_level() const OVERRIDE;
@@ -58,20 +58,21 @@ class EchoCancellationImpl : public EchoCancellationImplWrapper {
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
bool drift_compensation_enabled_;
bool metrics_enabled_;
SuppressionLevel suppression_level_;
- int device_sample_rate_hz_;
int stream_drift_samples_;
bool was_stream_drift_set_;
bool stream_has_echo_;
bool delay_logging_enabled_;
bool delay_correction_enabled_;
+ bool reported_delay_enabled_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
index f9bc3213ff1..49bcf9459b0 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_unittest.cc
@@ -14,6 +14,7 @@ extern "C" {
}
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
@@ -47,4 +48,34 @@ TEST(EchoCancellationInternalTest, DelayCorrection) {
EXPECT_EQ(0, WebRtcAec_delay_correction_enabled(aec_core));
}
+TEST(EchoCancellationInternalTest, ReportedDelay) {
+ scoped_ptr<AudioProcessing> ap(AudioProcessing::Create(0));
+ EXPECT_TRUE(ap->echo_cancellation()->aec_core() == NULL);
+
+ EXPECT_EQ(ap->kNoError, ap->echo_cancellation()->Enable(true));
+ EXPECT_TRUE(ap->echo_cancellation()->is_enabled());
+
+ AecCore* aec_core = ap->echo_cancellation()->aec_core();
+ ASSERT_TRUE(aec_core != NULL);
+ // Enabled by default.
+ EXPECT_EQ(1, WebRtcAec_reported_delay_enabled(aec_core));
+
+ Config config;
+ config.Set<ReportedDelay>(new ReportedDelay(false));
+ ap->SetExtraOptions(config);
+ EXPECT_EQ(0, WebRtcAec_reported_delay_enabled(aec_core));
+
+ // Retains setting after initialization.
+ EXPECT_EQ(ap->kNoError, ap->Initialize());
+ EXPECT_EQ(0, WebRtcAec_reported_delay_enabled(aec_core));
+
+ config.Set<ReportedDelay>(new ReportedDelay(true));
+ ap->SetExtraOptions(config);
+ EXPECT_EQ(1, WebRtcAec_reported_delay_enabled(aec_core));
+
+ // Retains setting after initialization.
+ EXPECT_EQ(ap->kNoError, ap->Initialize());
+ EXPECT_EQ(1, WebRtcAec_reported_delay_enabled(aec_core));
+}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h b/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h
deleted file mode 100644
index f1c03f32d6f..00000000000
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_cancellation_impl_wrapper.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_WRAPPER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_WRAPPER_H_
-
-#include "webrtc/modules/audio_processing/include/audio_processing.h"
-#include "webrtc/modules/audio_processing/processing_component.h"
-
-namespace webrtc {
-
-class AudioProcessingImpl;
-class AudioBuffer;
-
-class EchoCancellationImplWrapper : public virtual EchoCancellation,
- public virtual ProcessingComponent {
- public:
- static EchoCancellationImplWrapper* Create(
- const AudioProcessingImpl* audioproc);
- virtual ~EchoCancellationImplWrapper() {}
-
- virtual int ProcessRenderAudio(const AudioBuffer* audio) = 0;
- virtual int ProcessCaptureAudio(AudioBuffer* audio) = 0;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_ECHO_CANCELLATION_IMPL_WRAPPER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
index f7853814966..a03adc5300e 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.cc
@@ -15,7 +15,6 @@
#include "webrtc/modules/audio_processing/aecm/include/echo_control_mobile.h"
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@@ -63,9 +62,11 @@ size_t EchoControlMobile::echo_path_size_bytes() {
return WebRtcAecm_echo_path_size_bytes();
}
-EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+EchoControlMobileImpl::EchoControlMobileImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
routing_mode_(kSpeakerphone),
comfort_noise_enabled_(true),
external_echo_path_(NULL) {}
@@ -127,7 +128,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
for (int i = 0; i < audio->num_channels(); i++) {
// TODO(ajm): improve how this works, possibly inside AECM.
// This is kind of hacked up.
- int16_t* noisy = audio->low_pass_reference(i);
+ const int16_t* noisy = audio->low_pass_reference(i);
int16_t* clean = audio->low_pass_split_data(i);
if (noisy == NULL) {
noisy = clean;
@@ -155,7 +156,7 @@ int EchoControlMobileImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int EchoControlMobileImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
// Ensure AEC and AECM are not both enabled.
if (enable && apm_->echo_cancellation()->is_enabled()) {
return apm_->kBadParameterError;
@@ -169,7 +170,7 @@ bool EchoControlMobileImpl::is_enabled() const {
}
int EchoControlMobileImpl::set_routing_mode(RoutingMode mode) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(mode) == -1) {
return apm_->kBadParameterError;
}
@@ -184,7 +185,7 @@ EchoControlMobile::RoutingMode EchoControlMobileImpl::routing_mode()
}
int EchoControlMobileImpl::enable_comfort_noise(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
comfort_noise_enabled_ = enable;
return Configure();
}
@@ -195,7 +196,7 @@ bool EchoControlMobileImpl::is_comfort_noise_enabled() const {
int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
size_t size_bytes) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (echo_path == NULL) {
return apm_->kNullPointerError;
}
@@ -214,7 +215,7 @@ int EchoControlMobileImpl::SetEchoPath(const void* echo_path,
int EchoControlMobileImpl::GetEchoPath(void* echo_path,
size_t size_bytes) const {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (echo_path == NULL) {
return apm_->kNullPointerError;
}
@@ -240,7 +241,7 @@ int EchoControlMobileImpl::Initialize() {
return apm_->kNoError;
}
- if (apm_->sample_rate_hz() == apm_->kSampleRate32kHz) {
+ if (apm_->proc_sample_rate_hz() > apm_->kSampleRate16kHz) {
LOG(LS_ERROR) << "AECM only supports 16 kHz or lower sample rates";
return apm_->kBadSampleRateError;
}
@@ -259,14 +260,14 @@ void* EchoControlMobileImpl::CreateHandle() const {
return handle;
}
-int EchoControlMobileImpl::DestroyHandle(void* handle) const {
- return WebRtcAecm_Free(static_cast<Handle*>(handle));
+void EchoControlMobileImpl::DestroyHandle(void* handle) const {
+ WebRtcAecm_Free(static_cast<Handle*>(handle));
}
int EchoControlMobileImpl::InitializeHandle(void* handle) const {
assert(handle != NULL);
Handle* my_handle = static_cast<Handle*>(handle);
- if (WebRtcAecm_Init(my_handle, apm_->sample_rate_hz()) != 0) {
+ if (WebRtcAecm_Init(my_handle, apm_->proc_sample_rate_hz()) != 0) {
return GetHandleError(my_handle);
}
if (external_echo_path_ != NULL) {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h
index 5eefab0a3c2..4f5b5931a1c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/echo_control_mobile_impl.h
@@ -15,13 +15,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class EchoControlMobileImpl : public EchoControlMobile,
public ProcessingComponent {
public:
- explicit EchoControlMobileImpl(const AudioProcessingImpl* apm);
+ EchoControlMobileImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~EchoControlMobileImpl();
int ProcessRenderAudio(const AudioBuffer* audio);
@@ -47,11 +49,12 @@ class EchoControlMobileImpl : public EchoControlMobile,
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
RoutingMode routing_mode_;
bool comfort_noise_enabled_;
unsigned char* external_echo_path_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc
index 35547031e30..a67b67ecb16 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.cc
@@ -12,12 +12,10 @@
#include <assert.h>
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/modules/audio_processing/agc/include/gain_control.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
-
namespace webrtc {
typedef void Handle;
@@ -37,9 +35,11 @@ int16_t MapSetting(GainControl::Mode mode) {
}
} // namespace
-GainControlImpl::GainControlImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+GainControlImpl::GainControlImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
mode_(kAdaptiveAnalog),
minimum_capture_level_(0),
maximum_capture_level_(255),
@@ -59,7 +59,7 @@ int GainControlImpl::ProcessRenderAudio(AudioBuffer* audio) {
assert(audio->samples_per_split_channel() <= 160);
- int16_t* mixed_data = audio->low_pass_split_data(0);
+ const int16_t* mixed_data = audio->low_pass_split_data(0);
if (audio->num_channels() > 1) {
audio->CopyAndMixLowPass(1);
mixed_data = audio->mixed_low_pass_data(0);
@@ -91,6 +91,7 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
int err = apm_->kNoError;
if (mode_ == kAdaptiveAnalog) {
+ capture_levels_.assign(num_handles(), analog_capture_level_);
for (int i = 0; i < num_handles(); i++) {
Handle* my_handle = static_cast<Handle*>(handle(i));
err = WebRtcAgc_AddMic(
@@ -114,7 +115,6 @@ int GainControlImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
audio->low_pass_split_data(i),
audio->high_pass_split_data(i),
static_cast<int16_t>(audio->samples_per_split_channel()),
- //capture_levels_[i],
analog_capture_level_,
&capture_level_out);
@@ -190,13 +190,6 @@ int GainControlImpl::set_stream_analog_level(int level) {
if (level < minimum_capture_level_ || level > maximum_capture_level_) {
return apm_->kBadParameterError;
}
-
- if (mode_ == kAdaptiveAnalog) {
- if (level != analog_capture_level_) {
- // The analog level has been changed; update our internal levels.
- capture_levels_.assign(num_handles(), level);
- }
- }
analog_capture_level_ = level;
return apm_->kNoError;
@@ -210,7 +203,7 @@ int GainControlImpl::stream_analog_level() {
}
int GainControlImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -219,7 +212,7 @@ bool GainControlImpl::is_enabled() const {
}
int GainControlImpl::set_mode(Mode mode) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(mode) == -1) {
return apm_->kBadParameterError;
}
@@ -234,7 +227,7 @@ GainControl::Mode GainControlImpl::mode() const {
int GainControlImpl::set_analog_level_limits(int minimum,
int maximum) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (minimum < 0) {
return apm_->kBadParameterError;
}
@@ -266,7 +259,7 @@ bool GainControlImpl::stream_is_saturated() const {
}
int GainControlImpl::set_target_level_dbfs(int level) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (level > 31 || level < 0) {
return apm_->kBadParameterError;
}
@@ -280,7 +273,7 @@ int GainControlImpl::target_level_dbfs() const {
}
int GainControlImpl::set_compression_gain_db(int gain) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (gain < 0 || gain > 90) {
return apm_->kBadParameterError;
}
@@ -294,7 +287,7 @@ int GainControlImpl::compression_gain_db() const {
}
int GainControlImpl::enable_limiter(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
limiter_enabled_ = enable;
return Configure();
}
@@ -309,11 +302,7 @@ int GainControlImpl::Initialize() {
return err;
}
- analog_capture_level_ =
- (maximum_capture_level_ - minimum_capture_level_) >> 1;
capture_levels_.assign(num_handles(), analog_capture_level_);
- was_analog_level_set_ = false;
-
return apm_->kNoError;
}
@@ -328,8 +317,8 @@ void* GainControlImpl::CreateHandle() const {
return handle;
}
-int GainControlImpl::DestroyHandle(void* handle) const {
- return WebRtcAgc_Free(static_cast<Handle*>(handle));
+void GainControlImpl::DestroyHandle(void* handle) const {
+ WebRtcAgc_Free(static_cast<Handle*>(handle));
}
int GainControlImpl::InitializeHandle(void* handle) const {
@@ -337,7 +326,7 @@ int GainControlImpl::InitializeHandle(void* handle) const {
minimum_capture_level_,
maximum_capture_level_,
MapSetting(mode_),
- apm_->sample_rate_hz());
+ apm_->proc_sample_rate_hz());
}
int GainControlImpl::ConfigureHandle(void* handle) const {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h
index 2de02f6e6b7..81159870009 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/gain_control_impl.h
@@ -17,13 +17,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class GainControlImpl : public GainControl,
public ProcessingComponent {
public:
- explicit GainControlImpl(const AudioProcessingImpl* apm);
+ GainControlImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~GainControlImpl();
int ProcessRenderAudio(AudioBuffer* audio);
@@ -58,11 +60,12 @@ class GainControlImpl : public GainControl,
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
Mode mode_;
int minimum_capture_level_;
int maximum_capture_level_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets.gyp b/chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets.gyp
new file mode 100644
index 00000000000..55c79689f7e
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets.gyp
@@ -0,0 +1,45 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': ['lib_core_neon_offsets.gypi'],
+ 'targets' : [
+ {
+ 'target_name': 'gen_nsx_core_neon_offsets_h',
+ 'type': 'none',
+ 'dependencies': [
+ 'lib_core_neon_offsets',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx_obj_int_extract#host',
+ ],
+ 'sources': ['<(shared_generated_dir)/nsx_core_neon_offsets.o',],
+ 'variables' : {
+ 'unpack_lib_name':'nsx_core_neon_offsets.o',
+ },
+ 'includes': [
+ '../../../third_party/libvpx/unpack_lib_posix.gypi',
+ '../../../third_party/libvpx/obj_int_extract.gypi',
+ ],
+ },
+ {
+ 'target_name': 'gen_aecm_core_neon_offsets_h',
+ 'type': 'none',
+ 'dependencies': [
+ 'lib_core_neon_offsets',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx_obj_int_extract#host',
+ ],
+ 'variables': {
+ 'unpack_lib_name':'aecm_core_neon_offsets.o',
+ },
+ 'sources': ['<(shared_generated_dir)/aecm_core_neon_offsets.o',],
+ 'includes': [
+ '../../../third_party/libvpx/unpack_lib_posix.gypi',
+ '../../../third_party/libvpx/obj_int_extract.gypi',
+ ],
+ },
+ ],
+}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets_chromium.gyp b/chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets_chromium.gyp
new file mode 100644
index 00000000000..f4a9134fb21
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/gen_core_neon_offsets_chromium.gyp
@@ -0,0 +1,45 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': ['lib_core_neon_offsets.gypi'],
+ 'targets' : [
+ {
+ 'target_name': 'gen_nsx_core_neon_offsets_h',
+ 'type': 'none',
+ 'dependencies': [
+ 'lib_core_neon_offsets',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx_obj_int_extract#host',
+ ],
+ 'sources': ['<(shared_generated_dir)/nsx_core_neon_offsets.o',],
+ 'variables' : {
+ 'unpack_lib_name':'nsx_core_neon_offsets.o',
+ },
+ 'includes': [
+ '../../../../third_party/libvpx/unpack_lib_posix.gypi',
+ '../../../../third_party/libvpx/obj_int_extract.gypi',
+ ],
+ },
+ {
+ 'target_name': 'gen_aecm_core_neon_offsets_h',
+ 'type': 'none',
+ 'dependencies': [
+ 'lib_core_neon_offsets',
+ '<(DEPTH)/third_party/libvpx/libvpx.gyp:libvpx_obj_int_extract#host',
+ ],
+ 'variables': {
+ 'unpack_lib_name':'aecm_core_neon_offsets.o',
+ },
+ 'sources': ['<(shared_generated_dir)/aecm_core_neon_offsets.o',],
+ 'includes': [
+ '../../../../third_party/libvpx/unpack_lib_posix.gypi',
+ '../../../../third_party/libvpx/obj_int_extract.gypi',
+ ],
+ },
+ ],
+}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc
index da2170373b0..0a23ff23555 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.cc
@@ -13,11 +13,10 @@
#include <assert.h>
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/typedefs.h"
-#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
namespace webrtc {
namespace {
@@ -36,7 +35,7 @@ struct FilterState {
int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
assert(hpf != NULL);
- if (sample_rate_hz == AudioProcessingImpl::kSampleRate8kHz) {
+ if (sample_rate_hz == AudioProcessing::kSampleRate8kHz) {
hpf->ba = kFilterCoefficients8kHz;
} else {
hpf->ba = kFilterCoefficients;
@@ -105,9 +104,11 @@ int Filter(FilterState* hpf, int16_t* data, int length) {
typedef FilterState Handle;
-HighPassFilterImpl::HighPassFilterImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
- apm_(apm) {}
+HighPassFilterImpl::HighPassFilterImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
+ apm_(apm),
+ crit_(crit) {}
HighPassFilterImpl::~HighPassFilterImpl() {}
@@ -135,7 +136,7 @@ int HighPassFilterImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int HighPassFilterImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -147,14 +148,13 @@ void* HighPassFilterImpl::CreateHandle() const {
return new FilterState;
}
-int HighPassFilterImpl::DestroyHandle(void* handle) const {
+void HighPassFilterImpl::DestroyHandle(void* handle) const {
delete static_cast<Handle*>(handle);
- return apm_->kNoError;
}
int HighPassFilterImpl::InitializeHandle(void* handle) const {
return InitializeFilter(static_cast<Handle*>(handle),
- apm_->sample_rate_hz());
+ apm_->proc_sample_rate_hz());
}
int HighPassFilterImpl::ConfigureHandle(void* /*handle*/) const {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.h b/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.h
index 7e11ea9ceee..6f91f3bc049 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/high_pass_filter_impl.h
@@ -15,13 +15,14 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class HighPassFilterImpl : public HighPassFilter,
public ProcessingComponent {
public:
- explicit HighPassFilterImpl(const AudioProcessingImpl* apm);
+ HighPassFilterImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
virtual ~HighPassFilterImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@@ -37,11 +38,12 @@ class HighPassFilterImpl : public HighPassFilter,
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
};
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h b/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h
index b5c856de273..77c3f3add22 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/include/audio_processing.h
@@ -15,7 +15,6 @@
#include <stdio.h> // FILE
#include "webrtc/common.h"
-#include "webrtc/modules/interface/module.h"
#include "webrtc/typedefs.h"
struct AecCore;
@@ -50,11 +49,32 @@ class VoiceDetection;
// except when really necessary.
struct DelayCorrection {
DelayCorrection() : enabled(false) {}
- DelayCorrection(bool enabled) : enabled(enabled) {}
+ explicit DelayCorrection(bool enabled) : enabled(enabled) {}
+ bool enabled;
+};
+
+// Use to disable the reported system delays. By disabling the reported system
+// delays the echo cancellation algorithm assumes the process and reverse
+// streams to be aligned. This configuration only applies to EchoCancellation
+// and not EchoControlMobile and is set with AudioProcessing::SetExtraOptions().
+// Note that by disabling reported system delays the EchoCancellation may
+// regress in performance.
+struct ReportedDelay {
+ ReportedDelay() : enabled(true) {}
+ explicit ReportedDelay(bool enabled) : enabled(enabled) {}
+ bool enabled;
+};
+// Must be provided through AudioProcessing::Create(Confg&). It will have no
+// impact if used with AudioProcessing::SetExtraOptions().
+struct ExperimentalAgc {
+ ExperimentalAgc() : enabled(true) {}
+ explicit ExperimentalAgc(bool enabled) : enabled(enabled) {}
bool enabled;
};
+static const int kAudioProcMaxNativeSampleRateHz = 32000;
+
// The Audio Processing Module (APM) provides a collection of voice processing
// components designed for real-time communications software.
//
@@ -84,16 +104,12 @@ struct DelayCorrection {
// 2. Parameter getters are never called concurrently with the corresponding
// setter.
//
-// APM accepts only 16-bit linear PCM audio data in frames of 10 ms. Multiple
-// channels should be interleaved.
+// APM accepts only linear PCM audio data in chunks of 10 ms. The int16
+// interfaces use interleaved data, while the float interfaces use deinterleaved
+// data.
//
// Usage example, omitting error checking:
// AudioProcessing* apm = AudioProcessing::Create(0);
-// apm->set_sample_rate_hz(32000); // Super-wideband processing.
-//
-// // Mono capture and stereo render.
-// apm->set_num_channels(1, 1);
-// apm->set_num_reverse_channels(2);
//
// apm->high_pass_filter()->Enable(true);
//
@@ -132,13 +148,27 @@ struct DelayCorrection {
// // Close the application...
// delete apm;
//
-class AudioProcessing : public Module {
+class AudioProcessing {
public:
- // Creates a APM instance, with identifier |id|. Use one instance for every
- // primary audio stream requiring processing. On the client-side, this would
- // typically be one instance for the near-end stream, and additional instances
- // for each far-end stream which requires processing. On the server-side,
- // this would typically be one instance for every incoming stream.
+ enum ChannelLayout {
+ kMono,
+ // Left, right.
+ kStereo,
+ // Mono, keyboard mic.
+ kMonoAndKeyboard,
+ // Left, right, keyboard mic.
+ kStereoAndKeyboard
+ };
+
+ // Creates an APM instance. Use one instance for every primary audio stream
+ // requiring processing. On the client-side, this would typically be one
+ // instance for the near-end stream, and additional instances for each far-end
+ // stream which requires processing. On the server-side, this would typically
+ // be one instance for every incoming stream.
+ static AudioProcessing* Create();
+ // Allows passing in an optional configuration at create-time.
+ static AudioProcessing* Create(const Config& config);
+ // TODO(ajm): Deprecated; remove all calls to it.
static AudioProcessing* Create(int id);
virtual ~AudioProcessing() {}
@@ -147,11 +177,26 @@ class AudioProcessing : public Module {
// it is not necessary to call before processing the first stream after
// creation.
//
- // set_sample_rate_hz(), set_num_channels() and set_num_reverse_channels()
- // will trigger a full initialization if the settings are changed from their
- // existing values. Otherwise they are no-ops.
+ // It is also not necessary to call if the audio parameters (sample
+ // rate and number of channels) have changed. Passing updated parameters
+ // directly to |ProcessStream()| and |AnalyzeReverseStream()| is permissible.
+ // If the parameters are known at init-time though, they may be provided.
virtual int Initialize() = 0;
+ // The int16 interfaces require:
+ // - only |NativeRate|s be used
+ // - that the input, output and reverse rates must match
+ // - that |output_layout| matches |input_layout|
+ //
+ // The float interfaces accept arbitrary rates and support differing input
+ // and output layouts, but the output may only remove channels, not add.
+ virtual int Initialize(int input_sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout,
+ ChannelLayout reverse_layout) = 0;
+
// Pass down additional options which don't have explicit setters. This
// ensures the options are applied immediately.
virtual void SetExtraOptions(const Config& config) = 0;
@@ -159,23 +204,30 @@ class AudioProcessing : public Module {
virtual int EnableExperimentalNs(bool enable) = 0;
virtual bool experimental_ns_enabled() const = 0;
- // Sets the sample |rate| in Hz for both the primary and reverse audio
- // streams. 8000, 16000 or 32000 Hz are permitted.
+ // DEPRECATED.
+ // TODO(ajm): Remove after Chromium has upgraded to using Initialize().
virtual int set_sample_rate_hz(int rate) = 0;
+ // TODO(ajm): Remove after voice engine no longer requires it to resample
+ // the reverse stream to the forward rate.
+ virtual int input_sample_rate_hz() const = 0;
+ // TODO(ajm): Remove after Chromium no longer depends on it.
virtual int sample_rate_hz() const = 0;
- // Sets the number of channels for the primary audio stream. Input frames must
- // contain a number of channels given by |input_channels|, while output frames
- // will be returned with number of channels given by |output_channels|.
- virtual int set_num_channels(int input_channels, int output_channels) = 0;
+ // TODO(ajm): Only intended for internal use. Make private and friend the
+ // necessary classes?
+ virtual int proc_sample_rate_hz() const = 0;
+ virtual int proc_split_sample_rate_hz() const = 0;
virtual int num_input_channels() const = 0;
virtual int num_output_channels() const = 0;
-
- // Sets the number of channels for the reverse audio stream. Input frames must
- // contain a number of channels given by |channels|.
- virtual int set_num_reverse_channels(int channels) = 0;
virtual int num_reverse_channels() const = 0;
+ // Set to true when the output of AudioProcessing will be muted or in some
+ // other way not used. Ideally, the captured audio would still be processed,
+ // but some components may change behavior based on this information.
+ // Default false.
+ virtual void set_output_will_be_muted(bool muted) = 0;
+ virtual bool output_will_be_muted() const = 0;
+
// Processes a 10 ms |frame| of the primary audio stream. On the client-side,
// this is the near-end (or captured) audio.
//
@@ -184,10 +236,25 @@ class AudioProcessing : public Module {
// with the stream_ tag which is needed should be called after processing.
//
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
- // members of |frame| must be valid, and correspond to settings supplied
- // to APM.
+ // members of |frame| must be valid. If changed from the previous call to this
+ // method, it will trigger an initialization.
virtual int ProcessStream(AudioFrame* frame) = 0;
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+ // of |src| points to a channel buffer, arranged according to
+ // |input_layout|. At output, the channels will be arranged according to
+ // |output_layout| at |output_sample_rate_hz| in |dest|.
+ //
+ // The output layout may only remove channels, not add. |src| and |dest|
+ // may use the same memory, if desired.
+ virtual int ProcessStream(const float* const* src,
+ int samples_per_channel,
+ int input_sample_rate_hz,
+ ChannelLayout input_layout,
+ int output_sample_rate_hz,
+ ChannelLayout output_layout,
+ float* const* dest) = 0;
+
// Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
// will not be modified. On the client-side, this is the far-end (or to be
// rendered) audio.
@@ -199,11 +266,19 @@ class AudioProcessing : public Module {
// chances are you don't need to use it.
//
// The |sample_rate_hz_|, |num_channels_|, and |samples_per_channel_|
- // members of |frame| must be valid.
+ // members of |frame| must be valid. |sample_rate_hz_| must correspond to
+ // |input_sample_rate_hz()|
//
// TODO(ajm): add const to input; requires an implementation fix.
virtual int AnalyzeReverseStream(AudioFrame* frame) = 0;
+ // Accepts deinterleaved float audio with the range [-1, 1]. Each element
+ // of |data| points to a channel buffer, arranged according to |layout|.
+ virtual int AnalyzeReverseStream(const float* const* data,
+ int samples_per_channel,
+ int sample_rate_hz,
+ ChannelLayout layout) = 0;
+
// This must be called if and only if echo processing is enabled.
//
// Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end
@@ -219,6 +294,12 @@ class AudioProcessing : public Module {
// ProcessStream().
virtual int set_stream_delay_ms(int delay) = 0;
virtual int stream_delay_ms() const = 0;
+ virtual bool was_stream_delay_set() const = 0;
+
+ // Call to signal that a key press occurred (true) or did not occur (false)
+ // with this chunk of audio.
+ virtual void set_stream_key_pressed(bool key_pressed) = 0;
+ virtual bool stream_key_pressed() const = 0;
// Sets a delay |offset| in ms to add to the values passed in through
// set_stream_delay_ms(). May be positive or negative.
@@ -283,9 +364,13 @@ class AudioProcessing : public Module {
kBadStreamParameterWarning = -13
};
- // Inherited from Module.
- virtual int32_t TimeUntilNextProcess() OVERRIDE;
- virtual int32_t Process() OVERRIDE;
+ enum NativeRate {
+ kSampleRate8kHz = 8000,
+ kSampleRate16kHz = 16000,
+ kSampleRate32kHz = 32000
+ };
+
+ static const int kChunkSizeMs = 10;
};
// The acoustic echo cancellation (AEC) component provides better performance
@@ -306,16 +391,10 @@ class EchoCancellation {
// render and capture devices are used, particularly with webcams.
//
// This enables a compensation mechanism, and requires that
- // |set_device_sample_rate_hz()| and |set_stream_drift_samples()| be called.
+ // set_stream_drift_samples() be called.
virtual int enable_drift_compensation(bool enable) = 0;
virtual bool is_drift_compensation_enabled() const = 0;
- // Provides the sampling rate of the audio devices. It is assumed the render
- // and capture devices use the same nominal sample rate. Required if and only
- // if drift compensation is enabled.
- virtual int set_device_sample_rate_hz(int rate) = 0;
- virtual int device_sample_rate_hz() const = 0;
-
// Sets the difference between the number of samples rendered and captured by
// the audio devices since the last call to |ProcessStream()|. Must be called
// if drift compensation is enabled, prior to |ProcessStream()|.
@@ -555,8 +634,7 @@ class LevelEstimator {
// frames since the last call to RMS(). The returned value is positive but
// should be interpreted as negative. It is constrained to [0, 127].
//
- // The computation follows:
- // http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-05
+ // The computation follows: https://tools.ietf.org/html/rfc6465
// with the intent that it can provide the RTP audio level indication.
//
// Frames passed to ProcessStream() with an |_energy| of zero are considered
diff --git a/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h b/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h
index 46520ab494e..c1ac23adf76 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/include/mock_audio_processing.h
@@ -26,10 +26,6 @@ class MockEchoCancellation : public EchoCancellation {
int(bool enable));
MOCK_CONST_METHOD0(is_drift_compensation_enabled,
bool());
- MOCK_METHOD1(set_device_sample_rate_hz,
- int(int rate));
- MOCK_CONST_METHOD0(device_sample_rate_hz,
- int());
MOCK_METHOD1(set_stream_drift_samples,
void(int drift));
MOCK_CONST_METHOD0(stream_drift_samples,
@@ -181,6 +177,13 @@ class MockAudioProcessing : public AudioProcessing {
MOCK_METHOD0(Initialize,
int());
+ MOCK_METHOD6(Initialize,
+ int(int sample_rate_hz,
+ int output_sample_rate_hz,
+ int reverse_sample_rate_hz,
+ ChannelLayout input_layout,
+ ChannelLayout output_layout,
+ ChannelLayout reverse_layout));
MOCK_METHOD1(SetExtraOptions,
void(const Config& config));
MOCK_METHOD1(EnableExperimentalNs,
@@ -189,26 +192,49 @@ class MockAudioProcessing : public AudioProcessing {
bool());
MOCK_METHOD1(set_sample_rate_hz,
int(int rate));
+ MOCK_CONST_METHOD0(input_sample_rate_hz,
+ int());
MOCK_CONST_METHOD0(sample_rate_hz,
int());
- MOCK_METHOD2(set_num_channels,
- int(int input_channels, int output_channels));
+ MOCK_CONST_METHOD0(proc_sample_rate_hz,
+ int());
+ MOCK_CONST_METHOD0(proc_split_sample_rate_hz,
+ int());
MOCK_CONST_METHOD0(num_input_channels,
int());
MOCK_CONST_METHOD0(num_output_channels,
int());
- MOCK_METHOD1(set_num_reverse_channels,
- int(int channels));
MOCK_CONST_METHOD0(num_reverse_channels,
int());
+ MOCK_METHOD1(set_output_will_be_muted,
+ void(bool muted));
+ MOCK_CONST_METHOD0(output_will_be_muted,
+ bool());
MOCK_METHOD1(ProcessStream,
int(AudioFrame* frame));
+ MOCK_METHOD7(ProcessStream,
+ int(const float* const* src,
+ int samples_per_channel,
+ int input_sample_rate_hz,
+ ChannelLayout input_layout,
+ int output_sample_rate_hz,
+ ChannelLayout output_layout,
+ float* const* dest));
MOCK_METHOD1(AnalyzeReverseStream,
int(AudioFrame* frame));
+ MOCK_METHOD4(AnalyzeReverseStream,
+ int(const float* const* data, int frames, int sample_rate_hz,
+ ChannelLayout input_layout));
MOCK_METHOD1(set_stream_delay_ms,
int(int delay));
MOCK_CONST_METHOD0(stream_delay_ms,
int());
+ MOCK_CONST_METHOD0(was_stream_delay_set,
+ bool());
+ MOCK_METHOD1(set_stream_key_pressed,
+ void(bool key_pressed));
+ MOCK_CONST_METHOD0(stream_key_pressed,
+ bool());
MOCK_METHOD1(set_delay_offset_ms,
void(int offset));
MOCK_CONST_METHOD0(delay_offset_ms,
@@ -230,20 +256,16 @@ class MockAudioProcessing : public AudioProcessing {
}
virtual MockHighPassFilter* high_pass_filter() const {
return high_pass_filter_.get();
- };
+ }
virtual MockLevelEstimator* level_estimator() const {
return level_estimator_.get();
- };
+ }
virtual MockNoiseSuppression* noise_suppression() const {
return noise_suppression_.get();
- };
+ }
virtual MockVoiceDetection* voice_detection() const {
return voice_detection_.get();
- };
- MOCK_METHOD0(TimeUntilNextProcess,
- int32_t());
- MOCK_METHOD0(Process,
- int32_t());
+ }
private:
scoped_ptr<MockEchoCancellation> echo_cancellation_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.cc
index 29dbdfc78e0..cfe295a6a0b 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.cc
@@ -10,110 +10,35 @@
#include "webrtc/modules/audio_processing/level_estimator_impl.h"
-#include <assert.h>
-#include <math.h>
-#include <string.h>
-
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/modules/audio_processing/rms_level.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
-namespace {
-
-const double kMaxSquaredLevel = 32768.0 * 32768.0;
-
-class Level {
- public:
- static const int kMinLevel = 127;
-
- Level()
- : sum_square_(0.0),
- sample_count_(0) {}
- ~Level() {}
-
- void Init() {
- sum_square_ = 0.0;
- sample_count_ = 0;
- }
-
- void Process(int16_t* data, int length) {
- assert(data != NULL);
- assert(length > 0);
- sum_square_ += SumSquare(data, length);
- sample_count_ += length;
- }
-
- void ProcessMuted(int length) {
- assert(length > 0);
- sample_count_ += length;
- }
- int RMS() {
- if (sample_count_ == 0 || sum_square_ == 0.0) {
- Init();
- return kMinLevel;
- }
-
- // Normalize by the max level.
- double rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
- // 20log_10(x^0.5) = 10log_10(x)
- rms = 10 * log10(rms);
- if (rms > 0)
- rms = 0;
- else if (rms < -kMinLevel)
- rms = -kMinLevel;
-
- rms = -rms;
- Init();
- return static_cast<int>(rms + 0.5);
- }
-
- private:
- static double SumSquare(int16_t* data, int length) {
- double sum_square = 0.0;
- for (int i = 0; i < length; ++i) {
- double data_d = static_cast<double>(data[i]);
- sum_square += data_d * data_d;
- }
- return sum_square;
- }
-
- double sum_square_;
- int sample_count_;
-};
-} // namespace
-
-LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
- apm_(apm) {}
+LevelEstimatorImpl::LevelEstimatorImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
+ crit_(crit) {}
LevelEstimatorImpl::~LevelEstimatorImpl() {}
int LevelEstimatorImpl::ProcessStream(AudioBuffer* audio) {
if (!is_component_enabled()) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
- Level* level = static_cast<Level*>(handle(0));
- if (audio->is_muted()) {
- level->ProcessMuted(audio->samples_per_channel());
- return apm_->kNoError;
+ RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
+ for (int i = 0; i < audio->num_channels(); ++i) {
+ rms_level->Process(audio->data(i), audio->samples_per_channel());
}
- int16_t* mixed_data = audio->data(0);
- if (audio->num_channels() > 1) {
- audio->CopyAndMix(1);
- mixed_data = audio->mixed_data(0);
- }
-
- level->Process(mixed_data, audio->samples_per_channel());
-
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int LevelEstimatorImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -123,43 +48,38 @@ bool LevelEstimatorImpl::is_enabled() const {
int LevelEstimatorImpl::RMS() {
if (!is_component_enabled()) {
- return apm_->kNotEnabledError;
+ return AudioProcessing::kNotEnabledError;
}
- Level* level = static_cast<Level*>(handle(0));
- return level->RMS();
+ RMSLevel* rms_level = static_cast<RMSLevel*>(handle(0));
+ return rms_level->RMS();
}
+// The ProcessingComponent implementation is pretty weird in this class since
+// we have only a single instance of the trivial underlying component.
void* LevelEstimatorImpl::CreateHandle() const {
- return new Level;
+ return new RMSLevel;
}
-int LevelEstimatorImpl::DestroyHandle(void* handle) const {
- assert(handle != NULL);
- Level* level = static_cast<Level*>(handle);
- delete level;
- return apm_->kNoError;
+void LevelEstimatorImpl::DestroyHandle(void* handle) const {
+ delete static_cast<RMSLevel*>(handle);
}
int LevelEstimatorImpl::InitializeHandle(void* handle) const {
- assert(handle != NULL);
- Level* level = static_cast<Level*>(handle);
- level->Init();
-
- return apm_->kNoError;
+ static_cast<RMSLevel*>(handle)->Reset();
+ return AudioProcessing::kNoError;
}
int LevelEstimatorImpl::ConfigureHandle(void* /*handle*/) const {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int LevelEstimatorImpl::num_handles_required() const {
return 1;
}
-int LevelEstimatorImpl::GetHandleError(void* handle) const {
- // The component has no detailed errors.
- assert(handle != NULL);
- return apm_->kUnspecifiedError;
+int LevelEstimatorImpl::GetHandleError(void* /*handle*/) const {
+ return AudioProcessing::kUnspecifiedError;
}
+
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.h b/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.h
index 20dc18dc425..b38337d4d41 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/level_estimator_impl.h
@@ -13,15 +13,18 @@
#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/audio_processing/processing_component.h"
+#include "webrtc/modules/audio_processing/rms_level.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class LevelEstimatorImpl : public LevelEstimator,
public ProcessingComponent {
public:
- explicit LevelEstimatorImpl(const AudioProcessingImpl* apm);
+ LevelEstimatorImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~LevelEstimatorImpl();
int ProcessStream(AudioBuffer* audio);
@@ -38,12 +41,13 @@ class LevelEstimatorImpl : public LevelEstimator,
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ CriticalSectionWrapper* crit_;
};
+
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_LEVEL_ESTIMATOR_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/lib_core_neon_offsets.gypi b/chromium/third_party/webrtc/modules/audio_processing/lib_core_neon_offsets.gypi
new file mode 100644
index 00000000000..f32ddd47f78
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/lib_core_neon_offsets.gypi
@@ -0,0 +1,51 @@
+# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# This file has common information for gen_core_neon_offsets.gyp
+# and gen_core_neon_offsets_chromium.gyp
+{
+ 'variables': {
+ 'variables' : {
+ 'lib_intermediate_name': '',
+ 'conditions' : [
+ ['android_webview_build==1', {
+ 'lib_intermediate_name' : '$(abspath $(call intermediates-dir-for,STATIC_LIBRARIES,lib_core_neon_offsets,,,$(gyp_var_prefix)))/lib_core_neon_offsets.a',
+ }],
+ ],
+ },
+ 'shared_generated_dir': '<(SHARED_INTERMEDIATE_DIR)/audio_processing/asm_offsets',
+ 'output_dir': '<(shared_generated_dir)',
+ 'output_format': 'cheader',
+ 'unpack_lib_search_path_list': [
+ '-a', '<(PRODUCT_DIR)/lib_core_neon_offsets.a',
+ '-a', '<(LIB_DIR)/webrtc/modules/audio_processing/lib_core_neon_offsets.a',
+ '-a', '<(LIB_DIR)/third_party/webrtc/modules/audio_processing/lib_core_neon_offsets.a',
+ '-a', '<(lib_intermediate_name)',
+ ],
+ 'unpack_lib_output_dir':'<(shared_generated_dir)',
+ },
+ 'includes': [
+ '../../build/common.gypi',
+ ],
+ 'conditions': [
+ ['((target_arch=="arm" and arm_version==7) or target_arch=="armv7") and (OS=="android" or OS=="ios")', {
+ 'targets' : [
+ {
+ 'target_name': 'lib_core_neon_offsets',
+ 'type': 'static_library',
+ 'android_unmangled_name': 1,
+ 'hard_dependency': 1,
+ 'sources': [
+ 'ns/nsx_core_neon_offsets.c',
+ 'aecm/aecm_core_neon_offsets.c',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.cc
index 41c11b1cd72..eea0a04a2a6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.cc
@@ -12,15 +12,14 @@
#include <assert.h>
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
+#include "webrtc/modules/audio_processing/audio_buffer.h"
#if defined(WEBRTC_NS_FLOAT)
#include "webrtc/modules/audio_processing/ns/include/noise_suppression.h"
#elif defined(WEBRTC_NS_FIXED)
#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
#endif
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
namespace webrtc {
@@ -47,9 +46,11 @@ int MapSetting(NoiseSuppression::Level level) {
}
} // namespace
-NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+NoiseSuppressionImpl::NoiseSuppressionImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
level_(kModerate) {}
NoiseSuppressionImpl::~NoiseSuppressionImpl() {}
@@ -67,10 +68,10 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
Handle* my_handle = static_cast<Handle*>(handle(i));
#if defined(WEBRTC_NS_FLOAT)
err = WebRtcNs_Process(static_cast<Handle*>(handle(i)),
- audio->low_pass_split_data(i),
- audio->high_pass_split_data(i),
- audio->low_pass_split_data(i),
- audio->high_pass_split_data(i));
+ audio->low_pass_split_data_f(i),
+ audio->high_pass_split_data_f(i),
+ audio->low_pass_split_data_f(i),
+ audio->high_pass_split_data_f(i));
#elif defined(WEBRTC_NS_FIXED)
err = WebRtcNsx_Process(static_cast<Handle*>(handle(i)),
audio->low_pass_split_data(i),
@@ -88,7 +89,7 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int NoiseSuppressionImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -97,7 +98,7 @@ bool NoiseSuppressionImpl::is_enabled() const {
}
int NoiseSuppressionImpl::set_level(Level level) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(level) == -1) {
return apm_->kBadParameterError;
}
@@ -140,19 +141,21 @@ void* NoiseSuppressionImpl::CreateHandle() const {
return handle;
}
-int NoiseSuppressionImpl::DestroyHandle(void* handle) const {
+void NoiseSuppressionImpl::DestroyHandle(void* handle) const {
#if defined(WEBRTC_NS_FLOAT)
- return WebRtcNs_Free(static_cast<Handle*>(handle));
+ WebRtcNs_Free(static_cast<Handle*>(handle));
#elif defined(WEBRTC_NS_FIXED)
- return WebRtcNsx_Free(static_cast<Handle*>(handle));
+ WebRtcNsx_Free(static_cast<Handle*>(handle));
#endif
}
int NoiseSuppressionImpl::InitializeHandle(void* handle) const {
#if defined(WEBRTC_NS_FLOAT)
- return WebRtcNs_Init(static_cast<Handle*>(handle), apm_->sample_rate_hz());
+ return WebRtcNs_Init(static_cast<Handle*>(handle),
+ apm_->proc_sample_rate_hz());
#elif defined(WEBRTC_NS_FIXED)
- return WebRtcNsx_Init(static_cast<Handle*>(handle), apm_->sample_rate_hz());
+ return WebRtcNsx_Init(static_cast<Handle*>(handle),
+ apm_->proc_sample_rate_hz());
#endif
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h b/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h
index f6dd8cbd78a..cadbbd9cd4c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/noise_suppression_impl.h
@@ -15,13 +15,15 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class NoiseSuppressionImpl : public NoiseSuppression,
public ProcessingComponent {
public:
- explicit NoiseSuppressionImpl(const AudioProcessingImpl* apm);
+ NoiseSuppressionImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit);
virtual ~NoiseSuppressionImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@@ -40,13 +42,15 @@ class NoiseSuppressionImpl : public NoiseSuppression,
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
Level level_;
};
+
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_NOISE_SUPPRESSION_IMPL_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h b/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h
index 32b18038089..3cf889e2d07 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/include/noise_suppression.h
@@ -99,10 +99,10 @@ int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
* -1 - Error
*/
int WebRtcNs_Process(NsHandle* NS_inst,
- short* spframe,
- short* spframe_H,
- short* outframe,
- short* outframe_H);
+ float* spframe,
+ float* spframe_H,
+ float* outframe,
+ float* outframe_H);
/* Returns the internally used prior speech probability of the current frame.
* There is a frequency bin based one as well, with which this should not be
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c b/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c
index 848467f080c..075ab88c1c6 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/noise_suppression.c
@@ -43,8 +43,8 @@ int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) {
}
-int WebRtcNs_Process(NsHandle* NS_inst, short* spframe, short* spframe_H,
- short* outframe, short* outframe_H) {
+int WebRtcNs_Process(NsHandle* NS_inst, float* spframe, float* spframe_H,
+ float* outframe, float* outframe_H) {
return WebRtcNs_ProcessCore(
(NSinst_t*) NS_inst, spframe, spframe_H, outframe, outframe_H);
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c
index 124a66d8df5..ec267ae0f69 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.c
@@ -715,10 +715,10 @@ void WebRtcNs_SpeechNoiseProb(NSinst_t* inst, float* probSpeechFinal, float* snr
}
int WebRtcNs_ProcessCore(NSinst_t* inst,
- short* speechFrame,
- short* speechFrameHB,
- short* outFrame,
- short* outFrameHB) {
+ float* speechFrame,
+ float* speechFrameHB,
+ float* outFrame,
+ float* outFrameHB) {
// main routine for noise reduction
int flagHB = 0;
@@ -731,8 +731,8 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
float snrPrior, currentEstimateStsa;
float tmpFloat1, tmpFloat2, tmpFloat3, probSpeech, probNonSpeech;
float gammaNoiseTmp, gammaNoiseOld;
- float noiseUpdateTmp, fTmp, dTmp;
- float fin[BLOCKL_MAX], fout[BLOCKL_MAX];
+ float noiseUpdateTmp, fTmp;
+ float fout[BLOCKL_MAX];
float winData[ANAL_BLOCKL_MAX];
float magn[HALF_ANAL_BLOCKL], noise[HALF_ANAL_BLOCKL];
float theFilter[HALF_ANAL_BLOCKL], theFilterTmp[HALF_ANAL_BLOCKL];
@@ -775,26 +775,17 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
updateParsFlag = inst->modelUpdatePars[0];
//
- //for LB do all processing
- // convert to float
- for (i = 0; i < inst->blockLen10ms; i++) {
- fin[i] = (float)speechFrame[i];
- }
// update analysis buffer for L band
memcpy(inst->dataBuf, inst->dataBuf + inst->blockLen10ms,
sizeof(float) * (inst->anaLen - inst->blockLen10ms));
- memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, fin,
+ memcpy(inst->dataBuf + inst->anaLen - inst->blockLen10ms, speechFrame,
sizeof(float) * inst->blockLen10ms);
if (flagHB == 1) {
- // convert to float
- for (i = 0; i < inst->blockLen10ms; i++) {
- fin[i] = (float)speechFrameHB[i];
- }
// update analysis buffer for H band
memcpy(inst->dataBufHB, inst->dataBufHB + inst->blockLen10ms,
sizeof(float) * (inst->anaLen - inst->blockLen10ms));
- memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, fin,
+ memcpy(inst->dataBufHB + inst->anaLen - inst->blockLen10ms, speechFrameHB,
sizeof(float) * inst->blockLen10ms);
}
@@ -833,30 +824,16 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->outBuf[i] = fout[i + inst->blockLen10ms];
}
}
- // convert to short
- for (i = 0; i < inst->blockLen10ms; i++) {
- dTmp = fout[i];
- if (dTmp < WEBRTC_SPL_WORD16_MIN) {
- dTmp = WEBRTC_SPL_WORD16_MIN;
- } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
- dTmp = WEBRTC_SPL_WORD16_MAX;
- }
- outFrame[i] = (short)dTmp;
- }
+ for (i = 0; i < inst->blockLen10ms; ++i)
+ outFrame[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
// for time-domain gain of HB
- if (flagHB == 1) {
- for (i = 0; i < inst->blockLen10ms; i++) {
- dTmp = inst->dataBufHB[i];
- if (dTmp < WEBRTC_SPL_WORD16_MIN) {
- dTmp = WEBRTC_SPL_WORD16_MIN;
- } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
- dTmp = WEBRTC_SPL_WORD16_MAX;
- }
- outFrameHB[i] = (short)dTmp;
- }
- } // end of H band gain computation
- //
+ if (flagHB == 1)
+ for (i = 0; i < inst->blockLen10ms; ++i)
+ outFrameHB[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, inst->dataBufHB[i], WEBRTC_SPL_WORD16_MIN);
+
return 0;
}
@@ -1239,16 +1216,9 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
inst->outLen -= inst->blockLen10ms;
}
- // convert to short
- for (i = 0; i < inst->blockLen10ms; i++) {
- dTmp = fout[i];
- if (dTmp < WEBRTC_SPL_WORD16_MIN) {
- dTmp = WEBRTC_SPL_WORD16_MIN;
- } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
- dTmp = WEBRTC_SPL_WORD16_MAX;
- }
- outFrame[i] = (short)dTmp;
- }
+ for (i = 0; i < inst->blockLen10ms; ++i)
+ outFrame[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, fout[i], WEBRTC_SPL_WORD16_MIN);
// for time-domain gain of HB
if (flagHB == 1) {
@@ -1289,13 +1259,9 @@ int WebRtcNs_ProcessCore(NSinst_t* inst,
}
//apply gain
for (i = 0; i < inst->blockLen10ms; i++) {
- dTmp = gainTimeDomainHB * inst->dataBufHB[i];
- if (dTmp < WEBRTC_SPL_WORD16_MIN) {
- dTmp = WEBRTC_SPL_WORD16_MIN;
- } else if (dTmp > WEBRTC_SPL_WORD16_MAX) {
- dTmp = WEBRTC_SPL_WORD16_MAX;
- }
- outFrameHB[i] = (short)dTmp;
+ float o = gainTimeDomainHB * inst->dataBufHB[i];
+ outFrameHB[i] = WEBRTC_SPL_SAT(
+ WEBRTC_SPL_WORD16_MAX, o, WEBRTC_SPL_WORD16_MIN);
}
} // end of H band gain computation
//
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h
index 50daa137cf8..785239ebdac 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/ns_core.h
@@ -167,10 +167,10 @@ int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
int WebRtcNs_ProcessCore(NSinst_t* inst,
- short* inFrameLow,
- short* inFrameHigh,
- short* outFrameLow,
- short* outFrameHigh);
+ float* inFrameLow,
+ float* inFrameHigh,
+ float* outFrameLow,
+ float* outFrameHigh);
#ifdef __cplusplus
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c
index c7229579f4c..2c8270f568c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.c
@@ -70,11 +70,6 @@ static const int16_t WebRtcNsx_kLogTableFrac[256] = {
// Skip first frequency bins during estimation. (0 <= value < 64)
static const int kStartBand = 5;
-static const int16_t kIndicatorTable[17] = {
- 0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
- 7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187
-};
-
// hybrib Hanning & flat window
static const int16_t kBlocks80w128x[128] = {
0, 536, 1072, 1606, 2139, 2669, 3196, 3720, 4240, 4756, 5266,
@@ -481,7 +476,7 @@ static void PrepareSpectrumC(NsxInst_t* inst, int16_t* freq_buf) {
}
// Denormalize the real-valued signal |in|, the output from inverse FFT.
-static __inline void Denormalize(NsxInst_t* inst, int16_t* in, int factor) {
+static void DenormalizeC(NsxInst_t* inst, int16_t* in, int factor) {
int i = 0;
int32_t tmp32 = 0;
for (i = 0; i < inst->anaLen; i += 1) {
@@ -546,9 +541,9 @@ static void AnalysisUpdateC(NsxInst_t* inst,
}
// Normalize the real-valued signal |in|, the input to forward FFT.
-static __inline void NormalizeRealBuffer(NsxInst_t* inst,
- const int16_t* in,
- int16_t* out) {
+static void NormalizeRealBufferC(NsxInst_t* inst,
+ const int16_t* in,
+ int16_t* out) {
int i = 0;
for (i = 0; i < inst->anaLen; ++i) {
out[i] = WEBRTC_SPL_LSHIFT_W16(in[i], inst->normData); // Q(normData)
@@ -560,6 +555,8 @@ NoiseEstimation WebRtcNsx_NoiseEstimation;
PrepareSpectrum WebRtcNsx_PrepareSpectrum;
SynthesisUpdate WebRtcNsx_SynthesisUpdate;
AnalysisUpdate WebRtcNsx_AnalysisUpdate;
+Denormalize WebRtcNsx_Denormalize;
+NormalizeRealBuffer WebRtcNsx_NormalizeRealBuffer;
#if (defined WEBRTC_DETECT_ARM_NEON || defined WEBRTC_ARCH_ARM_NEON)
// Initialize function pointers for ARM Neon platform.
@@ -571,6 +568,19 @@ static void WebRtcNsx_InitNeon(void) {
}
#endif
+#if defined(MIPS32_LE)
+// Initialize function pointers for MIPS platform.
+static void WebRtcNsx_InitMips(void) {
+ WebRtcNsx_PrepareSpectrum = WebRtcNsx_PrepareSpectrum_mips;
+ WebRtcNsx_SynthesisUpdate = WebRtcNsx_SynthesisUpdate_mips;
+ WebRtcNsx_AnalysisUpdate = WebRtcNsx_AnalysisUpdate_mips;
+ WebRtcNsx_NormalizeRealBuffer = WebRtcNsx_NormalizeRealBuffer_mips;
+#if defined(MIPS_DSP_R1_LE)
+ WebRtcNsx_Denormalize = WebRtcNsx_Denormalize_mips;
+#endif
+}
+#endif
+
void WebRtcNsx_CalcParametricNoiseEstimate(NsxInst_t* inst,
int16_t pink_noise_exp_avg,
int32_t pink_noise_num_avg,
@@ -758,6 +768,8 @@ int32_t WebRtcNsx_InitCore(NsxInst_t* inst, uint32_t fs) {
WebRtcNsx_PrepareSpectrum = PrepareSpectrumC;
WebRtcNsx_SynthesisUpdate = SynthesisUpdateC;
WebRtcNsx_AnalysisUpdate = AnalysisUpdateC;
+ WebRtcNsx_Denormalize = DenormalizeC;
+ WebRtcNsx_NormalizeRealBuffer = NormalizeRealBufferC;
#ifdef WEBRTC_DETECT_ARM_NEON
uint64_t features = WebRtc_GetCPUFeaturesARM();
@@ -768,6 +780,10 @@ int32_t WebRtcNsx_InitCore(NsxInst_t* inst, uint32_t fs) {
WebRtcNsx_InitNeon();
#endif
+#if defined(MIPS32_LE)
+ WebRtcNsx_InitMips();
+#endif
+
inst->initFlag = 1;
return 0;
@@ -1169,239 +1185,6 @@ void WebRtcNsx_ComputeSpectralDifference(NsxInst_t* inst, uint16_t* magnIn) {
}
}
-// Compute speech/noise probability
-// speech/noise probability is returned in: probSpeechFinal
-//snrLocPrior is the prior SNR for each frequency (in Q11)
-//snrLocPost is the post SNR for each frequency (in Q11)
-void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst, uint16_t* nonSpeechProbFinal,
- uint32_t* priorLocSnr, uint32_t* postLocSnr) {
- uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
-
- int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
- int32_t frac32, logTmp;
- int32_t logLrtTimeAvgKsumFX;
-
- int16_t indPriorFX16;
- int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
-
- int i, normTmp, normTmp2, nShifts;
-
- // compute feature based on average LR factor
- // this is the average over all frequencies of the smooth log LRT
- logLrtTimeAvgKsumFX = 0;
- for (i = 0; i < inst->magnLen; i++) {
- besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
- normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
- num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
- if (normTmp > 10) {
- den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
- } else {
- den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
- }
- if (den > 0) {
- besselTmpFX32 -= WEBRTC_SPL_UDIV(num, den); // Q11
- } else {
- besselTmpFX32 -= num; // Q11
- }
-
- // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior) - inst->logLrtTimeAvg[i]);
- // Here, LRT_TAVG = 0.5
- zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
- frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
- tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
- tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
- tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12);
- frac32 = tmp32 + 37;
- // tmp32 = log2(priorLocSnr[i])
- tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
- logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8); // log2(priorLocSnr[i])*log(2)
- tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1); // Q12
- inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12
-
- logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
- }
- inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5, inst->stages + 10); // 5 = BIN_SIZE_LRT / 2
- // done with computation of LR factor
-
- //
- //compute the indicator functions
- //
-
- // average LRT feature
- // FLOAT code
- // indicator0 = 0.5 * (tanh(widthPrior * (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
- tmpIndFX = 16384; // Q14(1.0)
- tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
- nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
- //use larger width in tanh map for pause regions
- if (tmp32no1 < 0) {
- tmpIndFX = 0;
- tmp32no1 = -tmp32no1;
- //widthPrior = widthPrior * 2.0;
- nShifts++;
- }
- tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
- // compute indicator function: sigmoid map
- tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
- if ((tableIndex < 16) && (tableIndex >= 0)) {
- tmp16no2 = kIndicatorTable[tableIndex];
- tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
- frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
- tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
- if (tmpIndFX == 0) {
- tmpIndFX = 8192 - tmp16no2; // Q14
- } else {
- tmpIndFX = 8192 + tmp16no2; // Q14
- }
- }
- indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14
-
- //spectral flatness feature
- if (inst->weightSpecFlat) {
- tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
- tmpIndFX = 16384; // Q14(1.0)
- //use larger width in tanh map for pause regions
- tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
- nShifts = 4;
- if (inst->thresholdSpecFlat < tmpU32no1) {
- tmpIndFX = 0;
- tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
- //widthPrior = widthPrior * 2.0;
- nShifts++;
- }
- tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
- nShifts), 25); //Q14
- tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts), 25); //Q14
- // compute indicator function: sigmoid map
- // FLOAT code
- // indicator1 = 0.5 * (tanh(sgnMap * widthPrior * (threshPrior1 - tmpFloat1)) + 1.0);
- tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
- if (tableIndex < 16) {
- tmp16no2 = kIndicatorTable[tableIndex];
- tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
- frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
- tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
- if (tmpIndFX) {
- tmpIndFX = 8192 + tmp16no2; // Q14
- } else {
- tmpIndFX = 8192 - tmp16no2; // Q14
- }
- }
- indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
- }
-
- //for template spectral-difference
- if (inst->weightSpecDiff) {
- tmpU32no1 = 0;
- if (inst->featureSpecDiff) {
- normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
- WebRtcSpl_NormU32(inst->featureSpecDiff));
- tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp); // Q(normTmp-2*stages)
- tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy, 20 - inst->stages
- - normTmp);
- if (tmpU32no2 > 0) {
- // Q(20 - inst->stages)
- tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2);
- } else {
- tmpU32no1 = (uint32_t)(0x7fffffff);
- }
- }
- tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff, 17), 25);
- tmpU32no2 = tmpU32no1 - tmpU32no3;
- nShifts = 1;
- tmpIndFX = 16384; // Q14(1.0)
- //use larger width in tanh map for pause regions
- if (tmpU32no2 & 0x80000000) {
- tmpIndFX = 0;
- tmpU32no2 = tmpU32no3 - tmpU32no1;
- //widthPrior = widthPrior * 2.0;
- nShifts--;
- }
- tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
- // compute indicator function: sigmoid map
- /* FLOAT code
- indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
- */
- tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
- if (tableIndex < 16) {
- tmp16no2 = kIndicatorTable[tableIndex];
- tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
- frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
- tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
- tmp16no1, frac, 14);
- if (tmpIndFX) {
- tmpIndFX = 8192 + tmp16no2;
- } else {
- tmpIndFX = 8192 - tmp16no2;
- }
- }
- indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
- }
-
- //combine the indicator function with the feature weights
- // FLOAT code
- // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 * indicator1 + weightIndPrior2 * indicator2);
- indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
- // done with computing indicator function
-
- //compute the prior probability
- // FLOAT code
- // inst->priorNonSpeechProb += PRIOR_UPDATE * (indPriorNonSpeech - inst->priorNonSpeechProb);
- tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
- inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
- PRIOR_UPDATE_Q14, tmp16, 14); // Q14
-
- //final speech probability: combine prior model with LR factor:
-
- memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);
-
- if (inst->priorNonSpeechProb > 0) {
- for (i = 0; i < inst->magnLen; i++) {
- // FLOAT code
- // invLrt = exp(inst->logLrtTimeAvg[i]);
- // invLrt = inst->priorSpeechProb * invLrt;
- // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) / (1.0 - inst->priorSpeechProb + invLrt);
- // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
- // nonSpeechProbFinal[i] = inst->priorNonSpeechProb / (inst->priorNonSpeechProb + invLrt);
- if (inst->logLrtTimeAvgW32[i] < 65300) {
- tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(inst->logLrtTimeAvgW32[i], 23637),
- 14); // Q12
- intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
- if (intPart < -8) {
- intPart = -8;
- }
- frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12
-
- // Quadratic approximation of 2^frac
- tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12
- tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
- invLrtFX = WEBRTC_SPL_LSHIFT_W32(1, 8 + intPart)
- + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8
-
- normTmp = WebRtcSpl_NormW32(invLrtFX);
- normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
- if (normTmp + normTmp2 >= 7) {
- if (normTmp + normTmp2 < 15) {
- invLrtFX = WEBRTC_SPL_RSHIFT_W32(invLrtFX, 15 - normTmp2 - normTmp);
- // Q(normTmp+normTmp2-7)
- tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX, (16384 - inst->priorNonSpeechProb));
- // Q(normTmp+normTmp2+7)
- invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2); // Q14
- } else {
- tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX, (16384 - inst->priorNonSpeechProb)); // Q22
- invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14
- }
-
- tmp32no1 = WEBRTC_SPL_LSHIFT_W32((int32_t)inst->priorNonSpeechProb, 8); // Q22
-
- nonSpeechProbFinal[i] = (uint16_t)WEBRTC_SPL_DIV(tmp32no1,
- (int32_t)inst->priorNonSpeechProb + invLrtFX); // Q8
- }
- }
- }
- }
-}
-
// Transform input (speechFrame) to frequency domain magnitude (magnU16)
void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, uint16_t* magnU16) {
@@ -1461,7 +1244,7 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, uint16_t* magnU
right_shifts_in_magnU16 = WEBRTC_SPL_MAX(right_shifts_in_magnU16, 0);
// create realImag as winData interleaved with zeros (= imag. part), normalize it
- NormalizeRealBuffer(inst, winData, realImag);
+ WebRtcNsx_NormalizeRealBuffer(inst, winData, realImag);
// FFT output will be in winData[].
WebRtcSpl_RealForwardFFT(inst->real_fft, realImag, winData);
@@ -1624,9 +1407,9 @@ void WebRtcNsx_DataAnalysis(NsxInst_t* inst, short* speechFrame, uint16_t* magnU
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32((uint32_t)sum_log_i_log_magn, 12); // Q5
// Shift the largest value of sum_log_i and tmp32no3 before multiplication
- tmp_u16 = WEBRTC_SPL_LSHIFT_U16((uint16_t)sum_log_i, 1); // Q6
+ tmp_u16 = ((uint16_t)sum_log_i << 1); // Q6
if ((uint32_t)sum_log_i > tmpU32no1) {
- tmp_u16 = WEBRTC_SPL_RSHIFT_U16(tmp_u16, zeros);
+ tmp_u16 >>= zeros;
} else {
tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, zeros);
}
@@ -1693,7 +1476,7 @@ void WebRtcNsx_DataSynthesis(NsxInst_t* inst, short* outFrame) {
// Inverse FFT output will be in rfft_out[].
outCIFFT = WebRtcSpl_RealInverseFFT(inst->real_fft, realImag, rfft_out);
- Denormalize(inst, rfft_out, outCIFFT);
+ WebRtcNsx_Denormalize(inst, rfft_out, outCIFFT);
//scale factor: only do it after END_STARTUP_LONG time
gainFactor = 8192; // 8192 = Q13(1.0)
@@ -2288,8 +2071,8 @@ int WebRtcNsx_ProcessCore(NsxInst_t* inst, short* speechFrame, short* speechFram
tmpU16no1 += nonSpeechProbFinal[i]; // Q8
tmpU32no1 += (uint32_t)(inst->noiseSupFilter[i]); // Q14
}
- avgProbSpeechHB = (int16_t)(4096
- - WEBRTC_SPL_RSHIFT_U16(tmpU16no1, inst->stages - 7)); // Q12
+ assert(inst->stages >= 7);
+ avgProbSpeechHB = (4096 - (tmpU16no1 >> (inst->stages - 7))); // Q12
avgFilterGainHB = (int16_t)WEBRTC_SPL_RSHIFT_U32(
tmpU32no1, inst->stages - 3); // Q14
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h
index 1ad369ffbeb..5b3c5e78f4e 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core.h
@@ -201,6 +201,23 @@ typedef void (*AnalysisUpdate)(NsxInst_t* inst,
int16_t* new_speech);
extern AnalysisUpdate WebRtcNsx_AnalysisUpdate;
+// Denormalize the real-valued signal |in|, the output from inverse FFT.
+typedef void (*Denormalize) (NsxInst_t* inst, int16_t* in, int factor);
+extern Denormalize WebRtcNsx_Denormalize;
+
+// Normalize the real-valued signal |in|, the input to forward FFT.
+typedef void (*NormalizeRealBuffer) (NsxInst_t* inst,
+ const int16_t* in,
+ int16_t* out);
+extern NormalizeRealBuffer WebRtcNsx_NormalizeRealBuffer;
+
+// Compute speech/noise probability.
+// Intended to be private.
+void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
+ uint16_t* nonSpeechProbFinal,
+ uint32_t* priorLocSnr,
+ uint32_t* postLocSnr);
+
#if (defined WEBRTC_DETECT_ARM_NEON) || defined (WEBRTC_ARCH_ARM_NEON)
// For the above function pointers, functions for generic platforms are declared
// and defined as static in file nsx_core.c, while those for ARM Neon platforms
@@ -218,6 +235,26 @@ void WebRtcNsx_AnalysisUpdateNeon(NsxInst_t* inst,
void WebRtcNsx_PrepareSpectrumNeon(NsxInst_t* inst, int16_t* freq_buff);
#endif
+#if defined(MIPS32_LE)
+// For the above function pointers, functions for generic platforms are declared
+// and defined as static in file nsx_core.c, while those for MIPS platforms
+// are declared below and defined in file nsx_core_mips.c.
+void WebRtcNsx_SynthesisUpdate_mips(NsxInst_t* inst,
+ int16_t* out_frame,
+ int16_t gain_factor);
+void WebRtcNsx_AnalysisUpdate_mips(NsxInst_t* inst,
+ int16_t* out,
+ int16_t* new_speech);
+void WebRtcNsx_PrepareSpectrum_mips(NsxInst_t* inst, int16_t* freq_buff);
+void WebRtcNsx_NormalizeRealBuffer_mips(NsxInst_t* inst,
+ const int16_t* in,
+ int16_t* out);
+#if defined(MIPS_DSP_R1_LE)
+void WebRtcNsx_Denormalize_mips(NsxInst_t* inst, int16_t* in, int factor);
+#endif
+
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c
new file mode 100644
index 00000000000..452b96e77b0
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_c.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/nsx_core.h"
+
+static const int16_t kIndicatorTable[17] = {
+ 0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
+ 7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187
+};
+
+// Compute speech/noise probability
+// speech/noise probability is returned in: probSpeechFinal
+//snrLocPrior is the prior SNR for each frequency (in Q11)
+//snrLocPost is the post SNR for each frequency (in Q11)
+void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
+ uint16_t* nonSpeechProbFinal,
+ uint32_t* priorLocSnr,
+ uint32_t* postLocSnr) {
+
+ uint32_t zeros, num, den, tmpU32no1, tmpU32no2, tmpU32no3;
+ int32_t invLrtFX, indPriorFX, tmp32, tmp32no1, tmp32no2, besselTmpFX32;
+ int32_t frac32, logTmp;
+ int32_t logLrtTimeAvgKsumFX;
+ int16_t indPriorFX16;
+ int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac, intPart;
+ int i, normTmp, normTmp2, nShifts;
+
+ // compute feature based on average LR factor
+ // this is the average over all frequencies of the smooth log LRT
+ logLrtTimeAvgKsumFX = 0;
+ for (i = 0; i < inst->magnLen; i++) {
+ besselTmpFX32 = (int32_t)postLocSnr[i]; // Q11
+ normTmp = WebRtcSpl_NormU32(postLocSnr[i]);
+ num = WEBRTC_SPL_LSHIFT_U32(postLocSnr[i], normTmp); // Q(11+normTmp)
+ if (normTmp > 10) {
+ den = WEBRTC_SPL_LSHIFT_U32(priorLocSnr[i], normTmp - 11); // Q(normTmp)
+ } else {
+ den = WEBRTC_SPL_RSHIFT_U32(priorLocSnr[i], 11 - normTmp); // Q(normTmp)
+ }
+ if (den > 0) {
+ besselTmpFX32 -= WEBRTC_SPL_UDIV(num, den); // Q11
+ } else {
+ besselTmpFX32 -= num; // Q11
+ }
+
+ // inst->logLrtTimeAvg[i] += LRT_TAVG * (besselTmp - log(snrLocPrior)
+ // - inst->logLrtTimeAvg[i]);
+ // Here, LRT_TAVG = 0.5
+ zeros = WebRtcSpl_NormU32(priorLocSnr[i]);
+ frac32 = (int32_t)(((priorLocSnr[i] << zeros) & 0x7FFFFFFF) >> 19);
+ tmp32 = WEBRTC_SPL_MUL(frac32, frac32);
+ tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(tmp32, -43), 19);
+ tmp32 += WEBRTC_SPL_MUL_16_16_RSFT((int16_t)frac32, 5412, 12);
+ frac32 = tmp32 + 37;
+ // tmp32 = log2(priorLocSnr[i])
+ tmp32 = (int32_t)(((31 - zeros) << 12) + frac32) - (11 << 12); // Q12
+ logTmp = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_32_16(tmp32, 178), 8);
+ // log2(priorLocSnr[i])*log(2)
+ tmp32no1 = WEBRTC_SPL_RSHIFT_W32(logTmp + inst->logLrtTimeAvgW32[i], 1);
+ // Q12
+ inst->logLrtTimeAvgW32[i] += (besselTmpFX32 - tmp32no1); // Q12
+
+ logLrtTimeAvgKsumFX += inst->logLrtTimeAvgW32[i]; // Q12
+ }
+ inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5,
+ inst->stages + 10);
+ // 5 = BIN_SIZE_LRT / 2
+ // done with computation of LR factor
+
+ //
+ //compute the indicator functions
+ //
+
+ // average LRT feature
+ // FLOAT code
+ // indicator0 = 0.5 * (tanh(widthPrior *
+ // (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
+ tmpIndFX = 16384; // Q14(1.0)
+ tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
+ nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
+ //use larger width in tanh map for pause regions
+ if (tmp32no1 < 0) {
+ tmpIndFX = 0;
+ tmp32no1 = -tmp32no1;
+ //widthPrior = widthPrior * 2.0;
+ nShifts++;
+ }
+ tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
+ // compute indicator function: sigmoid map
+ tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
+ if ((tableIndex < 16) && (tableIndex >= 0)) {
+ tmp16no2 = kIndicatorTable[tableIndex];
+ tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+ frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
+ tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
+ if (tmpIndFX == 0) {
+ tmpIndFX = 8192 - tmp16no2; // Q14
+ } else {
+ tmpIndFX = 8192 + tmp16no2; // Q14
+ }
+ }
+ indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14
+
+ //spectral flatness feature
+ if (inst->weightSpecFlat) {
+ tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
+ tmpIndFX = 16384; // Q14(1.0)
+ //use larger width in tanh map for pause regions
+ tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
+ nShifts = 4;
+ if (inst->thresholdSpecFlat < tmpU32no1) {
+ tmpIndFX = 0;
+ tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
+ //widthPrior = widthPrior * 2.0;
+ nShifts++;
+ }
+ tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
+ nShifts), 25);
+ //Q14
+ tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts),
+ 25); //Q14
+ // compute indicator function: sigmoid map
+ // FLOAT code
+ // indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
+ // (threshPrior1 - tmpFloat1)) + 1.0);
+ tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
+ if (tableIndex < 16) {
+ tmp16no2 = kIndicatorTable[tableIndex];
+ tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+ frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+ tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
+ if (tmpIndFX) {
+ tmpIndFX = 8192 + tmp16no2; // Q14
+ } else {
+ tmpIndFX = 8192 - tmp16no2; // Q14
+ }
+ }
+ indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
+ }
+
+ //for template spectral-difference
+ if (inst->weightSpecDiff) {
+ tmpU32no1 = 0;
+ if (inst->featureSpecDiff) {
+ normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
+ WebRtcSpl_NormU32(inst->featureSpecDiff));
+ tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp);
+ // Q(normTmp-2*stages)
+ tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
+ 20 - inst->stages - normTmp);
+ if (tmpU32no2 > 0) {
+ // Q(20 - inst->stages)
+ tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2);
+ } else {
+ tmpU32no1 = (uint32_t)(0x7fffffff);
+ }
+ }
+ tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff,
+ 17),
+ 25);
+ tmpU32no2 = tmpU32no1 - tmpU32no3;
+ nShifts = 1;
+ tmpIndFX = 16384; // Q14(1.0)
+ //use larger width in tanh map for pause regions
+ if (tmpU32no2 & 0x80000000) {
+ tmpIndFX = 0;
+ tmpU32no2 = tmpU32no3 - tmpU32no1;
+ //widthPrior = widthPrior * 2.0;
+ nShifts--;
+ }
+ tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
+ // compute indicator function: sigmoid map
+ /* FLOAT code
+ indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
+ */
+ tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
+ if (tableIndex < 16) {
+ tmp16no2 = kIndicatorTable[tableIndex];
+ tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+ frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+ tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+ tmp16no1, frac, 14);
+ if (tmpIndFX) {
+ tmpIndFX = 8192 + tmp16no2;
+ } else {
+ tmpIndFX = 8192 - tmp16no2;
+ }
+ }
+ indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
+ }
+
+ //combine the indicator function with the feature weights
+ // FLOAT code
+ // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 *
+ // indicator1 + weightIndPrior2 * indicator2);
+ indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
+ // done with computing indicator function
+
+ //compute the prior probability
+ // FLOAT code
+ // inst->priorNonSpeechProb += PRIOR_UPDATE *
+ // (indPriorNonSpeech - inst->priorNonSpeechProb);
+ tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
+ inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
+ PRIOR_UPDATE_Q14, tmp16, 14); // Q14
+
+ //final speech probability: combine prior model with LR factor:
+
+ memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);
+
+ if (inst->priorNonSpeechProb > 0) {
+ for (i = 0; i < inst->magnLen; i++) {
+ // FLOAT code
+ // invLrt = exp(inst->logLrtTimeAvg[i]);
+ // invLrt = inst->priorSpeechProb * invLrt;
+ // nonSpeechProbFinal[i] = (1.0 - inst->priorSpeechProb) /
+ // (1.0 - inst->priorSpeechProb + invLrt);
+ // invLrt = (1.0 - inst->priorNonSpeechProb) * invLrt;
+ // nonSpeechProbFinal[i] = inst->priorNonSpeechProb /
+ // (inst->priorNonSpeechProb + invLrt);
+ if (inst->logLrtTimeAvgW32[i] < 65300) {
+ tmp32no1 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(
+ inst->logLrtTimeAvgW32[i], 23637),
+ 14); // Q12
+ intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 12);
+ if (intPart < -8) {
+ intPart = -8;
+ }
+ frac = (int16_t)(tmp32no1 & 0x00000fff); // Q12
+
+ // Quadratic approximation of 2^frac
+ tmp32no2 = WEBRTC_SPL_RSHIFT_W32(frac * frac * 44, 19); // Q12
+ tmp32no2 += WEBRTC_SPL_MUL_16_16_RSFT(frac, 84, 7); // Q12
+ invLrtFX = WEBRTC_SPL_LSHIFT_W32(1, 8 + intPart)
+ + WEBRTC_SPL_SHIFT_W32(tmp32no2, intPart - 4); // Q8
+
+ normTmp = WebRtcSpl_NormW32(invLrtFX);
+ normTmp2 = WebRtcSpl_NormW16((16384 - inst->priorNonSpeechProb));
+ if (normTmp + normTmp2 >= 7) {
+ if (normTmp + normTmp2 < 15) {
+ invLrtFX = WEBRTC_SPL_RSHIFT_W32(invLrtFX, 15 - normTmp2 - normTmp);
+ // Q(normTmp+normTmp2-7)
+ tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX,
+ (16384 - inst->priorNonSpeechProb));
+ // Q(normTmp+normTmp2+7)
+ invLrtFX = WEBRTC_SPL_SHIFT_W32(tmp32no1, 7 - normTmp - normTmp2);
+ // Q14
+ } else {
+ tmp32no1 = WEBRTC_SPL_MUL_32_16(invLrtFX,
+ (16384 - inst->priorNonSpeechProb));
+ // Q22
+ invLrtFX = WEBRTC_SPL_RSHIFT_W32(tmp32no1, 8); // Q14
+ }
+
+ tmp32no1 = WEBRTC_SPL_LSHIFT_W32((int32_t)inst->priorNonSpeechProb,
+ 8); // Q22
+
+ nonSpeechProbFinal[i] = (uint16_t)WEBRTC_SPL_DIV(tmp32no1,
+ (int32_t)inst->priorNonSpeechProb + invLrtFX); // Q8
+ }
+ }
+ }
+ }
+}
+
diff --git a/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c
new file mode 100644
index 00000000000..ccb0c376324
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/ns/nsx_core_mips.c
@@ -0,0 +1,1008 @@
+/*
+ * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/ns/include/noise_suppression_x.h"
+#include "webrtc/modules/audio_processing/ns/nsx_core.h"
+
+static const int16_t kIndicatorTable[17] = {
+ 0, 2017, 3809, 5227, 6258, 6963, 7424, 7718,
+ 7901, 8014, 8084, 8126, 8152, 8168, 8177, 8183, 8187
+};
+
+// Compute speech/noise probability
+// speech/noise probability is returned in: probSpeechFinal
+//snrLocPrior is the prior SNR for each frequency (in Q11)
+//snrLocPost is the post SNR for each frequency (in Q11)
+void WebRtcNsx_SpeechNoiseProb(NsxInst_t* inst,
+ uint16_t* nonSpeechProbFinal,
+ uint32_t* priorLocSnr,
+ uint32_t* postLocSnr) {
+
+ uint32_t tmpU32no1, tmpU32no2, tmpU32no3;
+ int32_t indPriorFX, tmp32no1;
+ int32_t logLrtTimeAvgKsumFX;
+ int16_t indPriorFX16;
+ int16_t tmp16, tmp16no1, tmp16no2, tmpIndFX, tableIndex, frac;
+ int i, normTmp, nShifts;
+
+ int32_t r0, r1, r2, r3, r4, r5, r6, r7, r8, r9;
+ int32_t const_max = 0x7fffffff;
+ int32_t const_neg43 = -43;
+ int32_t const_5412 = 5412;
+ int32_t const_11rsh12 = (11 << 12);
+ int32_t const_178 = 178;
+
+
+ // compute feature based on average LR factor
+ // this is the average over all frequencies of the smooth log LRT
+ logLrtTimeAvgKsumFX = 0;
+ for (i = 0; i < inst->magnLen; i++) {
+ r0 = postLocSnr[i]; // Q11
+ r1 = priorLocSnr[i];
+ r2 = inst->logLrtTimeAvgW32[i];
+
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "clz %[r3], %[r0] \n\t"
+ "clz %[r5], %[r1] \n\t"
+ "slti %[r4], %[r3], 32 \n\t"
+ "slti %[r6], %[r5], 32 \n\t"
+ "movz %[r3], $0, %[r4] \n\t"
+ "movz %[r5], $0, %[r6] \n\t"
+ "slti %[r4], %[r3], 11 \n\t"
+ "addiu %[r6], %[r3], -11 \n\t"
+ "neg %[r7], %[r6] \n\t"
+ "sllv %[r6], %[r1], %[r6] \n\t"
+ "srav %[r7], %[r1], %[r7] \n\t"
+ "movn %[r6], %[r7], %[r4] \n\t"
+ "sllv %[r1], %[r1], %[r5] \n\t"
+ "and %[r1], %[r1], %[const_max] \n\t"
+ "sra %[r1], %[r1], 19 \n\t"
+ "mul %[r7], %[r1], %[r1] \n\t"
+ "sllv %[r3], %[r0], %[r3] \n\t"
+ "divu %[r8], %[r3], %[r6] \n\t"
+ "slti %[r6], %[r6], 1 \n\t"
+ "mul %[r7], %[r7], %[const_neg43] \n\t"
+ "sra %[r7], %[r7], 19 \n\t"
+ "movz %[r3], %[r8], %[r6] \n\t"
+ "subu %[r0], %[r0], %[r3] \n\t"
+ "mul %[r1], %[r1], %[const_5412] \n\t"
+ "sra %[r1], %[r1], 12 \n\t"
+ "addu %[r7], %[r7], %[r1] \n\t"
+ "addiu %[r1], %[r7], 37 \n\t"
+ "addiu %[r5], %[r5], -31 \n\t"
+ "neg %[r5], %[r5] \n\t"
+ "sll %[r5], %[r5], 12 \n\t"
+ "addu %[r5], %[r5], %[r1] \n\t"
+ "subu %[r7], %[r5], %[const_11rsh12] \n\t"
+ "mul %[r7], %[r7], %[const_178] \n\t"
+ "sra %[r7], %[r7], 8 \n\t"
+ "addu %[r7], %[r7], %[r2] \n\t"
+ "sra %[r7], %[r7], 1 \n\t"
+ "subu %[r2], %[r2], %[r7] \n\t"
+ "addu %[r2], %[r2], %[r0] \n\t"
+ ".set pop \n\t"
+ : [r0] "+r" (r0), [r1] "+r" (r1), [r2] "+r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8)
+ : [const_max] "r" (const_max), [const_neg43] "r" (const_neg43),
+ [const_5412] "r" (const_5412), [const_11rsh12] "r" (const_11rsh12),
+ [const_178] "r" (const_178)
+ : "hi", "lo"
+ );
+ inst->logLrtTimeAvgW32[i] = r2;
+ logLrtTimeAvgKsumFX += r2;
+ }
+
+ inst->featureLogLrt = WEBRTC_SPL_RSHIFT_W32(logLrtTimeAvgKsumFX * 5,
+ inst->stages + 10);
+ // 5 = BIN_SIZE_LRT / 2
+ // done with computation of LR factor
+
+ //
+ // compute the indicator functions
+ //
+
+ // average LRT feature
+ // FLOAT code
+ // indicator0 = 0.5 * (tanh(widthPrior *
+ // (logLrtTimeAvgKsum - threshPrior0)) + 1.0);
+ tmpIndFX = 16384; // Q14(1.0)
+ tmp32no1 = logLrtTimeAvgKsumFX - inst->thresholdLogLrt; // Q12
+ nShifts = 7 - inst->stages; // WIDTH_PR_MAP_SHIFT - inst->stages + 5;
+ //use larger width in tanh map for pause regions
+ if (tmp32no1 < 0) {
+ tmpIndFX = 0;
+ tmp32no1 = -tmp32no1;
+ //widthPrior = widthPrior * 2.0;
+ nShifts++;
+ }
+ tmp32no1 = WEBRTC_SPL_SHIFT_W32(tmp32no1, nShifts); // Q14
+ // compute indicator function: sigmoid map
+ tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32no1, 14);
+ if ((tableIndex < 16) && (tableIndex >= 0)) {
+ tmp16no2 = kIndicatorTable[tableIndex];
+ tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+ frac = (int16_t)(tmp32no1 & 0x00003fff); // Q14
+ tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
+ if (tmpIndFX == 0) {
+ tmpIndFX = 8192 - tmp16no2; // Q14
+ } else {
+ tmpIndFX = 8192 + tmp16no2; // Q14
+ }
+ }
+ indPriorFX = WEBRTC_SPL_MUL_16_16(inst->weightLogLrt, tmpIndFX); // 6*Q14
+
+ //spectral flatness feature
+ if (inst->weightSpecFlat) {
+ tmpU32no1 = WEBRTC_SPL_UMUL(inst->featureSpecFlat, 400); // Q10
+ tmpIndFX = 16384; // Q14(1.0)
+ //use larger width in tanh map for pause regions
+ tmpU32no2 = inst->thresholdSpecFlat - tmpU32no1; //Q10
+ nShifts = 4;
+ if (inst->thresholdSpecFlat < tmpU32no1) {
+ tmpIndFX = 0;
+ tmpU32no2 = tmpU32no1 - inst->thresholdSpecFlat;
+ //widthPrior = widthPrior * 2.0;
+ nShifts++;
+ }
+ tmp32no1 = (int32_t)WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2,
+ nShifts), 25);
+ //Q14
+ tmpU32no1 = WebRtcSpl_DivU32U16(WEBRTC_SPL_LSHIFT_U32(tmpU32no2, nShifts),
+ 25); //Q14
+ // compute indicator function: sigmoid map
+ // FLOAT code
+ // indicator1 = 0.5 * (tanh(sgnMap * widthPrior *
+ // (threshPrior1 - tmpFloat1)) + 1.0);
+ tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
+ if (tableIndex < 16) {
+ tmp16no2 = kIndicatorTable[tableIndex];
+ tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+ frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+ tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(tmp16no1, frac, 14);
+ if (tmpIndFX) {
+ tmpIndFX = 8192 + tmp16no2; // Q14
+ } else {
+ tmpIndFX = 8192 - tmp16no2; // Q14
+ }
+ }
+ indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecFlat, tmpIndFX); // 6*Q14
+ }
+
+ //for template spectral-difference
+ if (inst->weightSpecDiff) {
+ tmpU32no1 = 0;
+ if (inst->featureSpecDiff) {
+ normTmp = WEBRTC_SPL_MIN(20 - inst->stages,
+ WebRtcSpl_NormU32(inst->featureSpecDiff));
+ tmpU32no1 = WEBRTC_SPL_LSHIFT_U32(inst->featureSpecDiff, normTmp);
+ // Q(normTmp-2*stages)
+ tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(inst->timeAvgMagnEnergy,
+ 20 - inst->stages - normTmp);
+ if (tmpU32no2 > 0) {
+ // Q(20 - inst->stages)
+ tmpU32no1 = WEBRTC_SPL_UDIV(tmpU32no1, tmpU32no2);
+ } else {
+ tmpU32no1 = (uint32_t)(0x7fffffff);
+ }
+ }
+ tmpU32no3 = WEBRTC_SPL_UDIV(WEBRTC_SPL_LSHIFT_U32(inst->thresholdSpecDiff,
+ 17),
+ 25);
+ tmpU32no2 = tmpU32no1 - tmpU32no3;
+ nShifts = 1;
+ tmpIndFX = 16384; // Q14(1.0)
+ //use larger width in tanh map for pause regions
+ if (tmpU32no2 & 0x80000000) {
+ tmpIndFX = 0;
+ tmpU32no2 = tmpU32no3 - tmpU32no1;
+ //widthPrior = widthPrior * 2.0;
+ nShifts--;
+ }
+ tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, nShifts);
+ // compute indicator function: sigmoid map
+ /* FLOAT code
+ indicator2 = 0.5 * (tanh(widthPrior * (tmpFloat1 - threshPrior2)) + 1.0);
+ */
+ tableIndex = (int16_t)WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 14);
+ if (tableIndex < 16) {
+ tmp16no2 = kIndicatorTable[tableIndex];
+ tmp16no1 = kIndicatorTable[tableIndex + 1] - kIndicatorTable[tableIndex];
+ frac = (int16_t)(tmpU32no1 & 0x00003fff); // Q14
+ tmp16no2 += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT_WITH_ROUND(
+ tmp16no1, frac, 14);
+ if (tmpIndFX) {
+ tmpIndFX = 8192 + tmp16no2;
+ } else {
+ tmpIndFX = 8192 - tmp16no2;
+ }
+ }
+ indPriorFX += WEBRTC_SPL_MUL_16_16(inst->weightSpecDiff, tmpIndFX); // 6*Q14
+ }
+
+ //combine the indicator function with the feature weights
+ // FLOAT code
+ // indPrior = 1 - (weightIndPrior0 * indicator0 + weightIndPrior1 *
+ // indicator1 + weightIndPrior2 * indicator2);
+ indPriorFX16 = WebRtcSpl_DivW32W16ResW16(98307 - indPriorFX, 6); // Q14
+ // done with computing indicator function
+
+ //compute the prior probability
+ // FLOAT code
+ // inst->priorNonSpeechProb += PRIOR_UPDATE *
+ // (indPriorNonSpeech - inst->priorNonSpeechProb);
+ tmp16 = indPriorFX16 - inst->priorNonSpeechProb; // Q14
+ inst->priorNonSpeechProb += (int16_t)WEBRTC_SPL_MUL_16_16_RSFT(
+ PRIOR_UPDATE_Q14, tmp16, 14); // Q14
+
+ //final speech probability: combine prior model with LR factor:
+
+ memset(nonSpeechProbFinal, 0, sizeof(uint16_t) * inst->magnLen);
+
+ if (inst->priorNonSpeechProb > 0) {
+ r0 = inst->priorNonSpeechProb;
+ r1 = 16384 - r0;
+ int32_t const_23637 = 23637;
+ int32_t const_44 = 44;
+ int32_t const_84 = 84;
+ int32_t const_1 = 1;
+ int32_t const_neg8 = -8;
+ for (i = 0; i < inst->magnLen; i++) {
+ r2 = inst->logLrtTimeAvgW32[i];
+ if (r2 < 65300) {
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "mul %[r2], %[r2], %[const_23637] \n\t"
+ "sll %[r6], %[r1], 16 \n\t"
+ "clz %[r7], %[r6] \n\t"
+ "clo %[r8], %[r6] \n\t"
+ "slt %[r9], %[r6], $0 \n\t"
+ "movn %[r7], %[r8], %[r9] \n\t"
+ "sra %[r2], %[r2], 14 \n\t"
+ "andi %[r3], %[r2], 0xfff \n\t"
+ "mul %[r4], %[r3], %[r3] \n\t"
+ "mul %[r3], %[r3], %[const_84] \n\t"
+ "sra %[r2], %[r2], 12 \n\t"
+ "slt %[r5], %[r2], %[const_neg8] \n\t"
+ "movn %[r2], %[const_neg8], %[r5] \n\t"
+ "mul %[r4], %[r4], %[const_44] \n\t"
+ "sra %[r3], %[r3], 7 \n\t"
+ "addiu %[r7], %[r7], -1 \n\t"
+ "slti %[r9], %[r7], 31 \n\t"
+ "movz %[r7], $0, %[r9] \n\t"
+ "sra %[r4], %[r4], 19 \n\t"
+ "addu %[r4], %[r4], %[r3] \n\t"
+ "addiu %[r3], %[r2], 8 \n\t"
+ "addiu %[r2], %[r2], -4 \n\t"
+ "neg %[r5], %[r2] \n\t"
+ "sllv %[r6], %[r4], %[r2] \n\t"
+ "srav %[r5], %[r4], %[r5] \n\t"
+ "slt %[r2], %[r2], $0 \n\t"
+ "movn %[r6], %[r5], %[r2] \n\t"
+ "sllv %[r3], %[const_1], %[r3] \n\t"
+ "addu %[r2], %[r3], %[r6] \n\t"
+ "clz %[r4], %[r2] \n\t"
+ "clo %[r5], %[r2] \n\t"
+ "slt %[r8], %[r2], $0 \n\t"
+ "movn %[r4], %[r5], %[r8] \n\t"
+ "addiu %[r4], %[r4], -1 \n\t"
+ "slt %[r5], $0, %[r2] \n\t"
+ "or %[r5], %[r5], %[r7] \n\t"
+ "movz %[r4], $0, %[r5] \n\t"
+ "addiu %[r6], %[r7], -7 \n\t"
+ "addu %[r6], %[r6], %[r4] \n\t"
+ "bltz %[r6], 1f \n\t"
+ " nop \n\t"
+ "addiu %[r4], %[r6], -8 \n\t"
+ "neg %[r3], %[r4] \n\t"
+ "srav %[r5], %[r2], %[r3] \n\t"
+ "mul %[r5], %[r5], %[r1] \n\t"
+ "mul %[r2], %[r2], %[r1] \n\t"
+ "slt %[r4], %[r4], $0 \n\t"
+ "srav %[r5], %[r5], %[r6] \n\t"
+ "sra %[r2], %[r2], 8 \n\t"
+ "movn %[r2], %[r5], %[r4] \n\t"
+ "sll %[r3], %[r0], 8 \n\t"
+ "addu %[r2], %[r0], %[r2] \n\t"
+ "divu %[r3], %[r3], %[r2] \n\t"
+ "1: \n\t"
+ ".set pop \n\t"
+ : [r2] "+r" (r2), [r3] "=&r" (r3), [r4] "=&r" (r4),
+ [r5] "=&r" (r5), [r6] "=&r" (r6), [r7] "=&r" (r7),
+ [r8] "=&r" (r8), [r9] "=&r" (r9)
+ : [r0] "r" (r0), [r1] "r" (r1), [const_23637] "r" (const_23637),
+ [const_neg8] "r" (const_neg8), [const_84] "r" (const_84),
+ [const_1] "r" (const_1), [const_44] "r" (const_44)
+ : "hi", "lo"
+ );
+ nonSpeechProbFinal[i] = r3;
+ }
+ }
+ }
+}
+
+// Update analysis buffer for lower band, and window data before FFT.
+void WebRtcNsx_AnalysisUpdate_mips(NsxInst_t* inst,
+ int16_t* out,
+ int16_t* new_speech) {
+
+ int iters, after;
+ int anaLen = inst->anaLen;
+ int *window = (int*)inst->window;
+ int *anaBuf = (int*)inst->analysisBuffer;
+ int *outBuf = (int*)out;
+ int r0, r1, r2, r3, r4, r5, r6, r7;
+#if defined(MIPS_DSP_R1_LE)
+ int r8;
+#endif
+
+ // For lower band update analysis buffer.
+ WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer,
+ inst->analysisBuffer + inst->blockLen10ms,
+ inst->anaLen - inst->blockLen10ms);
+ WEBRTC_SPL_MEMCPY_W16(inst->analysisBuffer
+ + inst->anaLen - inst->blockLen10ms, new_speech, inst->blockLen10ms);
+
+ // Window data before FFT.
+#if defined(MIPS_DSP_R1_LE)
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "sra %[iters], %[anaLen], 3 \n\t"
+ "1: \n\t"
+ "blez %[iters], 2f \n\t"
+ " nop \n\t"
+ "lw %[r0], 0(%[window]) \n\t"
+ "lw %[r1], 0(%[anaBuf]) \n\t"
+ "lw %[r2], 4(%[window]) \n\t"
+ "lw %[r3], 4(%[anaBuf]) \n\t"
+ "lw %[r4], 8(%[window]) \n\t"
+ "lw %[r5], 8(%[anaBuf]) \n\t"
+ "lw %[r6], 12(%[window]) \n\t"
+ "lw %[r7], 12(%[anaBuf]) \n\t"
+ "muleq_s.w.phl %[r8], %[r0], %[r1] \n\t"
+ "muleq_s.w.phr %[r0], %[r0], %[r1] \n\t"
+ "muleq_s.w.phl %[r1], %[r2], %[r3] \n\t"
+ "muleq_s.w.phr %[r2], %[r2], %[r3] \n\t"
+ "muleq_s.w.phl %[r3], %[r4], %[r5] \n\t"
+ "muleq_s.w.phr %[r4], %[r4], %[r5] \n\t"
+ "muleq_s.w.phl %[r5], %[r6], %[r7] \n\t"
+ "muleq_s.w.phr %[r6], %[r6], %[r7] \n\t"
+#if defined(MIPS_DSP_R2_LE)
+ "precr_sra_r.ph.w %[r8], %[r0], 15 \n\t"
+ "precr_sra_r.ph.w %[r1], %[r2], 15 \n\t"
+ "precr_sra_r.ph.w %[r3], %[r4], 15 \n\t"
+ "precr_sra_r.ph.w %[r5], %[r6], 15 \n\t"
+ "sw %[r8], 0(%[outBuf]) \n\t"
+ "sw %[r1], 4(%[outBuf]) \n\t"
+ "sw %[r3], 8(%[outBuf]) \n\t"
+ "sw %[r5], 12(%[outBuf]) \n\t"
+#else
+ "shra_r.w %[r8], %[r8], 15 \n\t"
+ "shra_r.w %[r0], %[r0], 15 \n\t"
+ "shra_r.w %[r1], %[r1], 15 \n\t"
+ "shra_r.w %[r2], %[r2], 15 \n\t"
+ "shra_r.w %[r3], %[r3], 15 \n\t"
+ "shra_r.w %[r4], %[r4], 15 \n\t"
+ "shra_r.w %[r5], %[r5], 15 \n\t"
+ "shra_r.w %[r6], %[r6], 15 \n\t"
+ "sll %[r0], %[r0], 16 \n\t"
+ "sll %[r2], %[r2], 16 \n\t"
+ "sll %[r4], %[r4], 16 \n\t"
+ "sll %[r6], %[r6], 16 \n\t"
+ "packrl.ph %[r0], %[r8], %[r0] \n\t"
+ "packrl.ph %[r2], %[r1], %[r2] \n\t"
+ "packrl.ph %[r4], %[r3], %[r4] \n\t"
+ "packrl.ph %[r6], %[r5], %[r6] \n\t"
+ "sw %[r0], 0(%[outBuf]) \n\t"
+ "sw %[r2], 4(%[outBuf]) \n\t"
+ "sw %[r4], 8(%[outBuf]) \n\t"
+ "sw %[r6], 12(%[outBuf]) \n\t"
+#endif
+ "addiu %[window], %[window], 16 \n\t"
+ "addiu %[anaBuf], %[anaBuf], 16 \n\t"
+ "addiu %[outBuf], %[outBuf], 16 \n\t"
+ "b 1b \n\t"
+ " addiu %[iters], %[iters], -1 \n\t"
+ "2: \n\t"
+ "andi %[after], %[anaLen], 7 \n\t"
+ "3: \n\t"
+ "blez %[after], 4f \n\t"
+ " nop \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[anaBuf]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "addiu %[window], %[window], 2 \n\t"
+ "addiu %[anaBuf], %[anaBuf], 2 \n\t"
+ "addiu %[outBuf], %[outBuf], 2 \n\t"
+ "shra_r.w %[r0], %[r0], 14 \n\t"
+ "sh %[r0], -2(%[outBuf]) \n\t"
+ "b 3b \n\t"
+ " addiu %[after], %[after], -1 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [r8] "=&r" (r8),
+ [iters] "=&r" (iters), [after] "=&r" (after),
+ [window] "+r" (window),[anaBuf] "+r" (anaBuf),
+ [outBuf] "+r" (outBuf)
+ : [anaLen] "r" (anaLen)
+ : "memory", "hi", "lo"
+ );
+#else
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "sra %[iters], %[anaLen], 2 \n\t"
+ "1: \n\t"
+ "blez %[iters], 2f \n\t"
+ " nop \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[anaBuf]) \n\t"
+ "lh %[r2], 2(%[window]) \n\t"
+ "lh %[r3], 2(%[anaBuf]) \n\t"
+ "lh %[r4], 4(%[window]) \n\t"
+ "lh %[r5], 4(%[anaBuf]) \n\t"
+ "lh %[r6], 6(%[window]) \n\t"
+ "lh %[r7], 6(%[anaBuf]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "mul %[r2], %[r2], %[r3] \n\t"
+ "mul %[r4], %[r4], %[r5] \n\t"
+ "mul %[r6], %[r6], %[r7] \n\t"
+ "addiu %[window], %[window], 8 \n\t"
+ "addiu %[anaBuf], %[anaBuf], 8 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "addiu %[r2], %[r2], 0x2000 \n\t"
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "sra %[r2], %[r2], 14 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+ "sh %[r0], 0(%[outBuf]) \n\t"
+ "sh %[r2], 2(%[outBuf]) \n\t"
+ "sh %[r4], 4(%[outBuf]) \n\t"
+ "sh %[r6], 6(%[outBuf]) \n\t"
+ "addiu %[outBuf], %[outBuf], 8 \n\t"
+ "b 1b \n\t"
+ " addiu %[iters], %[iters], -1 \n\t"
+ "2: \n\t"
+ "andi %[after], %[anaLen], 3 \n\t"
+ "3: \n\t"
+ "blez %[after], 4f \n\t"
+ " nop \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[anaBuf]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "addiu %[window], %[window], 2 \n\t"
+ "addiu %[anaBuf], %[anaBuf], 2 \n\t"
+ "addiu %[outBuf], %[outBuf], 2 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "sh %[r0], -2(%[outBuf]) \n\t"
+ "b 3b \n\t"
+ " addiu %[after], %[after], -1 \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [iters] "=&r" (iters),
+ [after] "=&r" (after), [window] "+r" (window),
+ [anaBuf] "+r" (anaBuf), [outBuf] "+r" (outBuf)
+ : [anaLen] "r" (anaLen)
+ : "memory", "hi", "lo"
+ );
+#endif
+}
+
+// For the noise supression process, synthesis, read out fully processed
+// segment, and update synthesis buffer.
+void WebRtcNsx_SynthesisUpdate_mips(NsxInst_t* inst,
+ int16_t* out_frame,
+ int16_t gain_factor) {
+
+ int iters = inst->blockLen10ms >> 2;
+ int after = inst->blockLen10ms & 3;
+ int r0, r1, r2, r3, r4, r5, r6, r7;
+ int16_t *window = (int16_t*)inst->window;
+ int16_t *real = inst->real;
+ int16_t *synthBuf = inst->synthesisBuffer;
+ int16_t *out = out_frame;
+ int sat_pos = 0x7fff;
+ int sat_neg = 0xffff8000;
+ int block10 = (int)inst->blockLen10ms;
+ int anaLen = (int)inst->anaLen;
+
+ __asm __volatile(
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "1: \n\t"
+ "blez %[iters], 2f \n\t"
+ " nop \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[real]) \n\t"
+ "lh %[r2], 2(%[window]) \n\t"
+ "lh %[r3], 2(%[real]) \n\t"
+ "lh %[r4], 4(%[window]) \n\t"
+ "lh %[r5], 4(%[real]) \n\t"
+ "lh %[r6], 6(%[window]) \n\t"
+ "lh %[r7], 6(%[real]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "mul %[r2], %[r2], %[r3] \n\t"
+ "mul %[r4], %[r4], %[r5] \n\t"
+ "mul %[r6], %[r6], %[r7] \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "addiu %[r2], %[r2], 0x2000 \n\t"
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "sra %[r2], %[r2], 14 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+ "mul %[r0], %[r0], %[gain_factor] \n\t"
+ "mul %[r2], %[r2], %[gain_factor] \n\t"
+ "mul %[r4], %[r4], %[gain_factor] \n\t"
+ "mul %[r6], %[r6], %[gain_factor] \n\t"
+ "addiu %[r0], %[r0], 0x1000 \n\t"
+ "addiu %[r2], %[r2], 0x1000 \n\t"
+ "addiu %[r4], %[r4], 0x1000 \n\t"
+ "addiu %[r6], %[r6], 0x1000 \n\t"
+ "sra %[r0], %[r0], 13 \n\t"
+ "sra %[r2], %[r2], 13 \n\t"
+ "sra %[r4], %[r4], 13 \n\t"
+ "sra %[r6], %[r6], 13 \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "slt %[r3], %[r2], %[sat_pos] \n\t"
+ "slt %[r5], %[r4], %[sat_pos] \n\t"
+ "slt %[r7], %[r6], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "movz %[r2], %[sat_pos], %[r3] \n\t"
+ "movz %[r4], %[sat_pos], %[r5] \n\t"
+ "movz %[r6], %[sat_pos], %[r7] \n\t"
+ "lh %[r1], 0(%[synthBuf]) \n\t"
+ "lh %[r3], 2(%[synthBuf]) \n\t"
+ "lh %[r5], 4(%[synthBuf]) \n\t"
+ "lh %[r7], 6(%[synthBuf]) \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "addu %[r4], %[r4], %[r5] \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "slt %[r3], %[r2], %[sat_pos] \n\t"
+ "slt %[r5], %[r4], %[sat_pos] \n\t"
+ "slt %[r7], %[r6], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "movz %[r2], %[sat_pos], %[r3] \n\t"
+ "movz %[r4], %[sat_pos], %[r5] \n\t"
+ "movz %[r6], %[sat_pos], %[r7] \n\t"
+ "slt %[r1], %[r0], %[sat_neg] \n\t"
+ "slt %[r3], %[r2], %[sat_neg] \n\t"
+ "slt %[r5], %[r4], %[sat_neg] \n\t"
+ "slt %[r7], %[r6], %[sat_neg] \n\t"
+ "movn %[r0], %[sat_neg], %[r1] \n\t"
+ "movn %[r2], %[sat_neg], %[r3] \n\t"
+ "movn %[r4], %[sat_neg], %[r5] \n\t"
+ "movn %[r6], %[sat_neg], %[r7] \n\t"
+ "sh %[r0], 0(%[synthBuf]) \n\t"
+ "sh %[r2], 2(%[synthBuf]) \n\t"
+ "sh %[r4], 4(%[synthBuf]) \n\t"
+ "sh %[r6], 6(%[synthBuf]) \n\t"
+ "sh %[r0], 0(%[out]) \n\t"
+ "sh %[r2], 2(%[out]) \n\t"
+ "sh %[r4], 4(%[out]) \n\t"
+ "sh %[r6], 6(%[out]) \n\t"
+ "addiu %[window], %[window], 8 \n\t"
+ "addiu %[real], %[real], 8 \n\t"
+ "addiu %[synthBuf],%[synthBuf], 8 \n\t"
+ "addiu %[out], %[out], 8 \n\t"
+ "b 1b \n\t"
+ " addiu %[iters], %[iters], -1 \n\t"
+ "2: \n\t"
+ "blez %[after], 3f \n\t"
+ " subu %[block10], %[anaLen], %[block10] \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[real]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "addiu %[window], %[window], 2 \n\t"
+ "addiu %[real], %[real], 2 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "mul %[r0], %[r0], %[gain_factor] \n\t"
+ "addiu %[r0], %[r0], 0x1000 \n\t"
+ "sra %[r0], %[r0], 13 \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "lh %[r1], 0(%[synthBuf]) \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "slt %[r1], %[r0], %[sat_neg] \n\t"
+ "movn %[r0], %[sat_neg], %[r1] \n\t"
+ "sh %[r0], 0(%[synthBuf]) \n\t"
+ "sh %[r0], 0(%[out]) \n\t"
+ "addiu %[synthBuf],%[synthBuf], 2 \n\t"
+ "addiu %[out], %[out], 2 \n\t"
+ "b 2b \n\t"
+ " addiu %[after], %[after], -1 \n\t"
+ "3: \n\t"
+ "sra %[iters], %[block10], 2 \n\t"
+ "4: \n\t"
+ "blez %[iters], 5f \n\t"
+ " andi %[after], %[block10], 3 \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[real]) \n\t"
+ "lh %[r2], 2(%[window]) \n\t"
+ "lh %[r3], 2(%[real]) \n\t"
+ "lh %[r4], 4(%[window]) \n\t"
+ "lh %[r5], 4(%[real]) \n\t"
+ "lh %[r6], 6(%[window]) \n\t"
+ "lh %[r7], 6(%[real]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "mul %[r2], %[r2], %[r3] \n\t"
+ "mul %[r4], %[r4], %[r5] \n\t"
+ "mul %[r6], %[r6], %[r7] \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "addiu %[r2], %[r2], 0x2000 \n\t"
+ "addiu %[r4], %[r4], 0x2000 \n\t"
+ "addiu %[r6], %[r6], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "sra %[r2], %[r2], 14 \n\t"
+ "sra %[r4], %[r4], 14 \n\t"
+ "sra %[r6], %[r6], 14 \n\t"
+ "mul %[r0], %[r0], %[gain_factor] \n\t"
+ "mul %[r2], %[r2], %[gain_factor] \n\t"
+ "mul %[r4], %[r4], %[gain_factor] \n\t"
+ "mul %[r6], %[r6], %[gain_factor] \n\t"
+ "addiu %[r0], %[r0], 0x1000 \n\t"
+ "addiu %[r2], %[r2], 0x1000 \n\t"
+ "addiu %[r4], %[r4], 0x1000 \n\t"
+ "addiu %[r6], %[r6], 0x1000 \n\t"
+ "sra %[r0], %[r0], 13 \n\t"
+ "sra %[r2], %[r2], 13 \n\t"
+ "sra %[r4], %[r4], 13 \n\t"
+ "sra %[r6], %[r6], 13 \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "slt %[r3], %[r2], %[sat_pos] \n\t"
+ "slt %[r5], %[r4], %[sat_pos] \n\t"
+ "slt %[r7], %[r6], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "movz %[r2], %[sat_pos], %[r3] \n\t"
+ "movz %[r4], %[sat_pos], %[r5] \n\t"
+ "movz %[r6], %[sat_pos], %[r7] \n\t"
+ "lh %[r1], 0(%[synthBuf]) \n\t"
+ "lh %[r3], 2(%[synthBuf]) \n\t"
+ "lh %[r5], 4(%[synthBuf]) \n\t"
+ "lh %[r7], 6(%[synthBuf]) \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+ "addu %[r2], %[r2], %[r3] \n\t"
+ "addu %[r4], %[r4], %[r5] \n\t"
+ "addu %[r6], %[r6], %[r7] \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "slt %[r3], %[r2], %[sat_pos] \n\t"
+ "slt %[r5], %[r4], %[sat_pos] \n\t"
+ "slt %[r7], %[r6], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "movz %[r2], %[sat_pos], %[r3] \n\t"
+ "movz %[r4], %[sat_pos], %[r5] \n\t"
+ "movz %[r6], %[sat_pos], %[r7] \n\t"
+ "slt %[r1], %[r0], %[sat_neg] \n\t"
+ "slt %[r3], %[r2], %[sat_neg] \n\t"
+ "slt %[r5], %[r4], %[sat_neg] \n\t"
+ "slt %[r7], %[r6], %[sat_neg] \n\t"
+ "movn %[r0], %[sat_neg], %[r1] \n\t"
+ "movn %[r2], %[sat_neg], %[r3] \n\t"
+ "movn %[r4], %[sat_neg], %[r5] \n\t"
+ "movn %[r6], %[sat_neg], %[r7] \n\t"
+ "sh %[r0], 0(%[synthBuf]) \n\t"
+ "sh %[r2], 2(%[synthBuf]) \n\t"
+ "sh %[r4], 4(%[synthBuf]) \n\t"
+ "sh %[r6], 6(%[synthBuf]) \n\t"
+ "addiu %[window], %[window], 8 \n\t"
+ "addiu %[real], %[real], 8 \n\t"
+ "addiu %[synthBuf],%[synthBuf], 8 \n\t"
+ "b 4b \n\t"
+ " addiu %[iters], %[iters], -1 \n\t"
+ "5: \n\t"
+ "blez %[after], 6f \n\t"
+ " nop \n\t"
+ "lh %[r0], 0(%[window]) \n\t"
+ "lh %[r1], 0(%[real]) \n\t"
+ "mul %[r0], %[r0], %[r1] \n\t"
+ "addiu %[window], %[window], 2 \n\t"
+ "addiu %[real], %[real], 2 \n\t"
+ "addiu %[r0], %[r0], 0x2000 \n\t"
+ "sra %[r0], %[r0], 14 \n\t"
+ "mul %[r0], %[r0], %[gain_factor] \n\t"
+ "addiu %[r0], %[r0], 0x1000 \n\t"
+ "sra %[r0], %[r0], 13 \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "lh %[r1], 0(%[synthBuf]) \n\t"
+ "addu %[r0], %[r0], %[r1] \n\t"
+ "slt %[r1], %[r0], %[sat_pos] \n\t"
+ "movz %[r0], %[sat_pos], %[r1] \n\t"
+ "slt %[r1], %[r0], %[sat_neg] \n\t"
+ "movn %[r0], %[sat_neg], %[r1] \n\t"
+ "sh %[r0], 0(%[synthBuf]) \n\t"
+ "addiu %[synthBuf],%[synthBuf], 2 \n\t"
+ "b 2b \n\t"
+ " addiu %[after], %[after], -1 \n\t"
+ "6: \n\t"
+ ".set pop \n\t"
+ : [r0] "=&r" (r0), [r1] "=&r" (r1), [r2] "=&r" (r2),
+ [r3] "=&r" (r3), [r4] "=&r" (r4), [r5] "=&r" (r5),
+ [r6] "=&r" (r6), [r7] "=&r" (r7), [iters] "+r" (iters),
+ [after] "+r" (after), [block10] "+r" (block10),
+ [window] "+r" (window), [real] "+r" (real),
+ [synthBuf] "+r" (synthBuf), [out] "+r" (out)
+ : [gain_factor] "r" (gain_factor), [sat_pos] "r" (sat_pos),
+ [sat_neg] "r" (sat_neg), [anaLen] "r" (anaLen)
+ : "memory", "hi", "lo"
+ );
+
+ // update synthesis buffer
+ WEBRTC_SPL_MEMCPY_W16(inst->synthesisBuffer,
+ inst->synthesisBuffer + inst->blockLen10ms,
+ inst->anaLen - inst->blockLen10ms);
+ WebRtcSpl_ZerosArrayW16(inst->synthesisBuffer
+ + inst->anaLen - inst->blockLen10ms, inst->blockLen10ms);
+}
+
+// Filter the data in the frequency domain, and create spectrum.
+void WebRtcNsx_PrepareSpectrum_mips(NsxInst_t* inst, int16_t* freq_buf) {
+
+ uint16_t *noiseSupFilter = inst->noiseSupFilter;
+ int16_t *real = inst->real;
+ int16_t *imag = inst->imag;
+ int32_t loop_count = 2;
+ int16_t tmp_1, tmp_2, tmp_3, tmp_4, tmp_5, tmp_6;
+ int16_t tmp16 = (inst->anaLen << 1) - 4;
+ int16_t* freq_buf_f = freq_buf;
+ int16_t* freq_buf_s = &freq_buf[tmp16];
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ //first sample
+ "lh %[tmp_1], 0(%[noiseSupFilter]) \n\t"
+ "lh %[tmp_2], 0(%[real]) \n\t"
+ "lh %[tmp_3], 0(%[imag]) \n\t"
+ "mul %[tmp_2], %[tmp_2], %[tmp_1] \n\t"
+ "mul %[tmp_3], %[tmp_3], %[tmp_1] \n\t"
+ "sra %[tmp_2], %[tmp_2], 14 \n\t"
+ "sra %[tmp_3], %[tmp_3], 14 \n\t"
+ "sh %[tmp_2], 0(%[real]) \n\t"
+ "sh %[tmp_3], 0(%[imag]) \n\t"
+ "negu %[tmp_3], %[tmp_3] \n\t"
+ "sh %[tmp_2], 0(%[freq_buf_f]) \n\t"
+ "sh %[tmp_3], 2(%[freq_buf_f]) \n\t"
+ "addiu %[real], %[real], 2 \n\t"
+ "addiu %[imag], %[imag], 2 \n\t"
+ "addiu %[noiseSupFilter], %[noiseSupFilter], 2 \n\t"
+ "addiu %[freq_buf_f], %[freq_buf_f], 4 \n\t"
+ "1: \n\t"
+ "lh %[tmp_1], 0(%[noiseSupFilter]) \n\t"
+ "lh %[tmp_2], 0(%[real]) \n\t"
+ "lh %[tmp_3], 0(%[imag]) \n\t"
+ "lh %[tmp_4], 2(%[noiseSupFilter]) \n\t"
+ "lh %[tmp_5], 2(%[real]) \n\t"
+ "lh %[tmp_6], 2(%[imag]) \n\t"
+ "mul %[tmp_2], %[tmp_2], %[tmp_1] \n\t"
+ "mul %[tmp_3], %[tmp_3], %[tmp_1] \n\t"
+ "mul %[tmp_5], %[tmp_5], %[tmp_4] \n\t"
+ "mul %[tmp_6], %[tmp_6], %[tmp_4] \n\t"
+ "addiu %[loop_count], %[loop_count], 2 \n\t"
+ "sra %[tmp_2], %[tmp_2], 14 \n\t"
+ "sra %[tmp_3], %[tmp_3], 14 \n\t"
+ "sra %[tmp_5], %[tmp_5], 14 \n\t"
+ "sra %[tmp_6], %[tmp_6], 14 \n\t"
+ "addiu %[noiseSupFilter], %[noiseSupFilter], 4 \n\t"
+ "sh %[tmp_2], 0(%[real]) \n\t"
+ "sh %[tmp_2], 4(%[freq_buf_s]) \n\t"
+ "sh %[tmp_3], 0(%[imag]) \n\t"
+ "sh %[tmp_3], 6(%[freq_buf_s]) \n\t"
+ "negu %[tmp_3], %[tmp_3] \n\t"
+ "sh %[tmp_5], 2(%[real]) \n\t"
+ "sh %[tmp_5], 0(%[freq_buf_s]) \n\t"
+ "sh %[tmp_6], 2(%[imag]) \n\t"
+ "sh %[tmp_6], 2(%[freq_buf_s]) \n\t"
+ "negu %[tmp_6], %[tmp_6] \n\t"
+ "addiu %[freq_buf_s], %[freq_buf_s], -8 \n\t"
+ "addiu %[real], %[real], 4 \n\t"
+ "addiu %[imag], %[imag], 4 \n\t"
+ "sh %[tmp_2], 0(%[freq_buf_f]) \n\t"
+ "sh %[tmp_3], 2(%[freq_buf_f]) \n\t"
+ "sh %[tmp_5], 4(%[freq_buf_f]) \n\t"
+ "sh %[tmp_6], 6(%[freq_buf_f]) \n\t"
+ "blt %[loop_count], %[loop_size], 1b \n\t"
+ " addiu %[freq_buf_f], %[freq_buf_f], 8 \n\t"
+ //last two samples:
+ "lh %[tmp_1], 0(%[noiseSupFilter]) \n\t"
+ "lh %[tmp_2], 0(%[real]) \n\t"
+ "lh %[tmp_3], 0(%[imag]) \n\t"
+ "lh %[tmp_4], 2(%[noiseSupFilter]) \n\t"
+ "lh %[tmp_5], 2(%[real]) \n\t"
+ "lh %[tmp_6], 2(%[imag]) \n\t"
+ "mul %[tmp_2], %[tmp_2], %[tmp_1] \n\t"
+ "mul %[tmp_3], %[tmp_3], %[tmp_1] \n\t"
+ "mul %[tmp_5], %[tmp_5], %[tmp_4] \n\t"
+ "mul %[tmp_6], %[tmp_6], %[tmp_4] \n\t"
+ "sra %[tmp_2], %[tmp_2], 14 \n\t"
+ "sra %[tmp_3], %[tmp_3], 14 \n\t"
+ "sra %[tmp_5], %[tmp_5], 14 \n\t"
+ "sra %[tmp_6], %[tmp_6], 14 \n\t"
+ "sh %[tmp_2], 0(%[real]) \n\t"
+ "sh %[tmp_2], 4(%[freq_buf_s]) \n\t"
+ "sh %[tmp_3], 0(%[imag]) \n\t"
+ "sh %[tmp_3], 6(%[freq_buf_s]) \n\t"
+ "negu %[tmp_3], %[tmp_3] \n\t"
+ "sh %[tmp_2], 0(%[freq_buf_f]) \n\t"
+ "sh %[tmp_3], 2(%[freq_buf_f]) \n\t"
+ "sh %[tmp_5], 4(%[freq_buf_f]) \n\t"
+ "sh %[tmp_6], 6(%[freq_buf_f]) \n\t"
+ "sh %[tmp_5], 2(%[real]) \n\t"
+ "sh %[tmp_6], 2(%[imag]) \n\t"
+ ".set pop \n\t"
+ : [real] "+r" (real), [imag] "+r" (imag),
+ [freq_buf_f] "+r" (freq_buf_f), [freq_buf_s] "+r" (freq_buf_s),
+ [loop_count] "+r" (loop_count), [noiseSupFilter] "+r" (noiseSupFilter),
+ [tmp_1] "=&r" (tmp_1), [tmp_2] "=&r" (tmp_2), [tmp_3] "=&r" (tmp_3),
+ [tmp_4] "=&r" (tmp_4), [tmp_5] "=&r" (tmp_5), [tmp_6] "=&r" (tmp_6)
+ : [loop_size] "r" (inst->anaLen2)
+ : "memory", "hi", "lo"
+ );
+}
+
+#if defined(MIPS_DSP_R1_LE)
+// Denormalize the real-valued signal |in|, the output from inverse FFT.
+void WebRtcNsx_Denormalize_mips(NsxInst_t* inst, int16_t* in, int factor) {
+ int32_t r0, r1, r2, r3, t0;
+ int len = inst->anaLen;
+ int16_t *out = &inst->real[0];
+ int shift = factor - inst->normData;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "beqz %[len], 8f \n\t"
+ " nop \n\t"
+ "bltz %[shift], 4f \n\t"
+ " sra %[t0], %[len], 2 \n\t"
+ "beqz %[t0], 2f \n\t"
+ " andi %[len], %[len], 3 \n\t"
+ "1: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 2(%[in]) \n\t"
+ "lh %[r2], 4(%[in]) \n\t"
+ "lh %[r3], 6(%[in]) \n\t"
+ "shllv_s.ph %[r0], %[r0], %[shift] \n\t"
+ "shllv_s.ph %[r1], %[r1], %[shift] \n\t"
+ "shllv_s.ph %[r2], %[r2], %[shift] \n\t"
+ "shllv_s.ph %[r3], %[r3], %[shift] \n\t"
+ "addiu %[in], %[in], 8 \n\t"
+ "addiu %[t0], %[t0], -1 \n\t"
+ "sh %[r0], 0(%[out]) \n\t"
+ "sh %[r1], 2(%[out]) \n\t"
+ "sh %[r2], 4(%[out]) \n\t"
+ "sh %[r3], 6(%[out]) \n\t"
+ "bgtz %[t0], 1b \n\t"
+ " addiu %[out], %[out], 8 \n\t"
+ "2: \n\t"
+ "beqz %[len], 8f \n\t"
+ " nop \n\t"
+ "3: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "addiu %[in], %[in], 2 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "shllv_s.ph %[r0], %[r0], %[shift] \n\t"
+ "addiu %[out], %[out], 2 \n\t"
+ "bgtz %[len], 3b \n\t"
+ " sh %[r0], -2(%[out]) \n\t"
+ "b 8f \n\t"
+ "4: \n\t"
+ "negu %[shift], %[shift] \n\t"
+ "beqz %[t0], 6f \n\t"
+ " andi %[len], %[len], 3 \n\t"
+ "5: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 2(%[in]) \n\t"
+ "lh %[r2], 4(%[in]) \n\t"
+ "lh %[r3], 6(%[in]) \n\t"
+ "srav %[r0], %[r0], %[shift] \n\t"
+ "srav %[r1], %[r1], %[shift] \n\t"
+ "srav %[r2], %[r2], %[shift] \n\t"
+ "srav %[r3], %[r3], %[shift] \n\t"
+ "addiu %[in], %[in], 8 \n\t"
+ "addiu %[t0], %[t0], -1 \n\t"
+ "sh %[r0], 0(%[out]) \n\t"
+ "sh %[r1], 2(%[out]) \n\t"
+ "sh %[r2], 4(%[out]) \n\t"
+ "sh %[r3], 6(%[out]) \n\t"
+ "bgtz %[t0], 5b \n\t"
+ " addiu %[out], %[out], 8 \n\t"
+ "6: \n\t"
+ "beqz %[len], 8f \n\t"
+ " nop \n\t"
+ "7: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "addiu %[in], %[in], 2 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "srav %[r0], %[r0], %[shift] \n\t"
+ "addiu %[out], %[out], 2 \n\t"
+ "bgtz %[len], 7b \n\t"
+ " sh %[r0], -2(%[out]) \n\t"
+ "8: \n\t"
+ ".set pop \n\t"
+ : [t0] "=&r" (t0), [r0] "=&r" (r0), [r1] "=&r" (r1),
+ [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [len] "r" (len), [shift] "r" (shift), [in] "r" (in),
+ [out] "r" (out)
+ : "memory"
+ );
+}
+#endif
+
+// Normalize the real-valued signal |in|, the input to forward FFT.
+void WebRtcNsx_NormalizeRealBuffer_mips(NsxInst_t* inst,
+ const int16_t* in,
+ int16_t* out) {
+ int32_t r0, r1, r2, r3, t0;
+ int len = inst->anaLen;
+ int shift = inst->normData;
+
+ __asm __volatile (
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "beqz %[len], 4f \n\t"
+ " sra %[t0], %[len], 2 \n\t"
+ "beqz %[t0], 2f \n\t"
+ " andi %[len], %[len], 3 \n\t"
+ "1: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "lh %[r1], 2(%[in]) \n\t"
+ "lh %[r2], 4(%[in]) \n\t"
+ "lh %[r3], 6(%[in]) \n\t"
+ "sllv %[r0], %[r0], %[shift] \n\t"
+ "sllv %[r1], %[r1], %[shift] \n\t"
+ "sllv %[r2], %[r2], %[shift] \n\t"
+ "sllv %[r3], %[r3], %[shift] \n\t"
+ "addiu %[in], %[in], 8 \n\t"
+ "addiu %[t0], %[t0], -1 \n\t"
+ "sh %[r0], 0(%[out]) \n\t"
+ "sh %[r1], 2(%[out]) \n\t"
+ "sh %[r2], 4(%[out]) \n\t"
+ "sh %[r3], 6(%[out]) \n\t"
+ "bgtz %[t0], 1b \n\t"
+ " addiu %[out], %[out], 8 \n\t"
+ "2: \n\t"
+ "beqz %[len], 4f \n\t"
+ " nop \n\t"
+ "3: \n\t"
+ "lh %[r0], 0(%[in]) \n\t"
+ "addiu %[in], %[in], 2 \n\t"
+ "addiu %[len], %[len], -1 \n\t"
+ "sllv %[r0], %[r0], %[shift] \n\t"
+ "addiu %[out], %[out], 2 \n\t"
+ "bgtz %[len], 3b \n\t"
+ " sh %[r0], -2(%[out]) \n\t"
+ "4: \n\t"
+ ".set pop \n\t"
+ : [t0] "=&r" (t0), [r0] "=&r" (r0), [r1] "=&r" (r1),
+ [r2] "=&r" (r2), [r3] "=&r" (r3)
+ : [len] "r" (len), [shift] "r" (shift), [in] "r" (in),
+ [out] "r" (out)
+ : "memory"
+ );
+}
+
diff --git a/chromium/third_party/webrtc/modules/audio_processing/processing_component.cc b/chromium/third_party/webrtc/modules/audio_processing/processing_component.cc
index 23bf22570b1..9e16d7c4eea 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/processing_component.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/processing_component.cc
@@ -12,15 +12,12 @@
#include <assert.h>
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
namespace webrtc {
-ProcessingComponent::ProcessingComponent() {}
-
-ProcessingComponent::ProcessingComponent(const AudioProcessingImpl* apm)
- : apm_(apm),
- initialized_(false),
+ProcessingComponent::ProcessingComponent()
+ : initialized_(false),
enabled_(false),
num_handles_(0) {}
@@ -35,7 +32,7 @@ int ProcessingComponent::Destroy() {
}
initialized_ = false;
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
int ProcessingComponent::EnableComponent(bool enable) {
@@ -43,7 +40,7 @@ int ProcessingComponent::EnableComponent(bool enable) {
enabled_ = enable; // Must be set before Initialize() is called.
int err = Initialize();
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
enabled_ = false;
return err;
}
@@ -51,7 +48,7 @@ int ProcessingComponent::EnableComponent(bool enable) {
enabled_ = enable;
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
bool ProcessingComponent::is_component_enabled() const {
@@ -69,7 +66,7 @@ int ProcessingComponent::num_handles() const {
int ProcessingComponent::Initialize() {
if (!enabled_) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
num_handles_ = num_handles_required();
@@ -82,12 +79,12 @@ int ProcessingComponent::Initialize() {
if (handles_[i] == NULL) {
handles_[i] = CreateHandle();
if (handles_[i] == NULL) {
- return apm_->kCreationFailedError;
+ return AudioProcessing::kCreationFailedError;
}
}
int err = InitializeHandle(handles_[i]);
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(handles_[i]);
}
}
@@ -98,17 +95,17 @@ int ProcessingComponent::Initialize() {
int ProcessingComponent::Configure() {
if (!initialized_) {
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
assert(static_cast<int>(handles_.size()) >= num_handles_);
for (int i = 0; i < num_handles_; i++) {
int err = ConfigureHandle(handles_[i]);
- if (err != apm_->kNoError) {
+ if (err != AudioProcessing::kNoError) {
return GetHandleError(handles_[i]);
}
}
- return apm_->kNoError;
+ return AudioProcessing::kNoError;
}
} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/processing_component.h b/chromium/third_party/webrtc/modules/audio_processing/processing_component.h
index c090d222456..8ee3ac6c7db 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/processing_component.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/processing_component.h
@@ -13,16 +13,13 @@
#include <vector>
-#include "webrtc/modules/audio_processing/include/audio_processing.h"
+#include "webrtc/common.h"
namespace webrtc {
-class AudioProcessingImpl;
-
class ProcessingComponent {
public:
ProcessingComponent();
- explicit ProcessingComponent(const AudioProcessingImpl* apm);
virtual ~ProcessingComponent();
virtual int Initialize();
@@ -41,11 +38,10 @@ class ProcessingComponent {
virtual void* CreateHandle() const = 0;
virtual int InitializeHandle(void* handle) const = 0;
virtual int ConfigureHandle(void* handle) const = 0;
- virtual int DestroyHandle(void* handle) const = 0;
+ virtual void DestroyHandle(void* handle) const = 0;
virtual int num_handles_required() const = 0;
virtual int GetHandleError(void* handle) const = 0;
- const AudioProcessingImpl* apm_;
std::vector<void*> handles_;
bool initialized_;
bool enabled_;
diff --git a/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc b/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc
new file mode 100644
index 00000000000..14136bf3049
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/rms_level.cc
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/rms_level.h"
+
+#include <assert.h>
+#include <math.h>
+
+namespace webrtc {
+
+static const float kMaxSquaredLevel = 32768 * 32768;
+
+RMSLevel::RMSLevel()
+ : sum_square_(0),
+ sample_count_(0) {}
+
+RMSLevel::~RMSLevel() {}
+
+void RMSLevel::Reset() {
+ sum_square_ = 0;
+ sample_count_ = 0;
+}
+
+void RMSLevel::Process(const int16_t* data, int length) {
+ for (int i = 0; i < length; ++i) {
+ sum_square_ += data[i] * data[i];
+ }
+ sample_count_ += length;
+}
+
+void RMSLevel::ProcessMuted(int length) {
+ sample_count_ += length;
+}
+
+int RMSLevel::RMS() {
+ if (sample_count_ == 0 || sum_square_ == 0) {
+ Reset();
+ return kMinLevel;
+ }
+
+ // Normalize by the max level.
+ float rms = sum_square_ / (sample_count_ * kMaxSquaredLevel);
+ // 20log_10(x^0.5) = 10log_10(x)
+ rms = 10 * log10(rms);
+ assert(rms <= 0);
+ if (rms < -kMinLevel)
+ rms = -kMinLevel;
+
+ rms = -rms;
+ Reset();
+ return static_cast<int>(rms + 0.5);
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/rms_level.h b/chromium/third_party/webrtc/modules/audio_processing/rms_level.h
new file mode 100644
index 00000000000..055d271bb19
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/rms_level.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+// Computes the root mean square (RMS) level in dBFs (decibels from digital
+// full-scale) of audio data. The computation follows RFC 6465:
+// https://tools.ietf.org/html/rfc6465
+// with the intent that it can provide the RTP audio level indication.
+//
+// The expected approach is to provide constant-sized chunks of audio to
+// Process(). When enough chunks have been accumulated to form a packet, call
+// RMS() to get the audio level indicator for the RTP header.
+class RMSLevel {
+ public:
+ static const int kMinLevel = 127;
+
+ RMSLevel();
+ ~RMSLevel();
+
+ // Can be called to reset internal states, but is not required during normal
+ // operation.
+ void Reset();
+
+ // Pass each chunk of audio to Process() to accumulate the level.
+ void Process(const int16_t* data, int length);
+
+ // If all samples with the given |length| have a magnitude of zero, this is
+ // a shortcut to avoid some computation.
+ void ProcessMuted(int length);
+
+ // Computes the RMS level over all data passed to Process() since the last
+ // call to RMS(). The returned value is positive but should be interpreted as
+ // negative as per the RFC. It is constrained to [0, 127].
+ int RMS();
+
+ private:
+ float sum_square_;
+ int sample_count_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_RMS_LEVEL_H_
+
diff --git a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc
deleted file mode 100644
index 372c8dc426e..00000000000
--- a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/modules/audio_processing/splitting_filter.h"
-
-namespace webrtc {
-
-void SplittingFilterAnalysis(const int16_t* in_data,
- int16_t* low_band,
- int16_t* high_band,
- int32_t* filter_state1,
- int32_t* filter_state2)
-{
- WebRtcSpl_AnalysisQMF(in_data, low_band, high_band, filter_state1, filter_state2);
-}
-
-void SplittingFilterSynthesis(const int16_t* low_band,
- const int16_t* high_band,
- int16_t* out_data,
- int32_t* filt_state1,
- int32_t* filt_state2)
-{
- WebRtcSpl_SynthesisQMF(low_band, high_band, out_data, filt_state1, filt_state2);
-}
-} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h b/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h
deleted file mode 100644
index b6c851273a6..00000000000
--- a/chromium/third_party/webrtc/modules/audio_processing/splitting_filter.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
-
-#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-/*
- * SplittingFilterbank_analysisQMF(...)
- *
- * Splits a super-wb signal into two subbands: 0-8 kHz and 8-16 kHz.
- *
- * Input:
- * - in_data : super-wb audio signal
- *
- * Input & Output:
- * - filt_state1: Filter state for first all-pass filter
- * - filt_state2: Filter state for second all-pass filter
- *
- * Output:
- * - low_band : The signal from the 0-4 kHz band
- * - high_band : The signal from the 4-8 kHz band
- */
-void SplittingFilterAnalysis(const int16_t* in_data,
- int16_t* low_band,
- int16_t* high_band,
- int32_t* filt_state1,
- int32_t* filt_state2);
-
-/*
- * SplittingFilterbank_synthesisQMF(...)
- *
- * Combines the two subbands (0-8 and 8-16 kHz) into a super-wb signal.
- *
- * Input:
- * - low_band : The signal with the 0-8 kHz band
- * - high_band : The signal with the 8-16 kHz band
- *
- * Input & Output:
- * - filt_state1: Filter state for first all-pass filter
- * - filt_state2: Filter state for second all-pass filter
- *
- * Output:
- * - out_data : super-wb speech signal
- */
-void SplittingFilterSynthesis(const int16_t* low_band,
- const int16_t* high_band,
- int16_t* out_data,
- int32_t* filt_state1,
- int32_t* filt_state2);
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_SPLITTING_FILTER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/typing_detection.cc b/chromium/third_party/webrtc/modules/audio_processing/typing_detection.cc
new file mode 100644
index 00000000000..5f5ce0abafd
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/typing_detection.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_processing/typing_detection.h"
+
+namespace webrtc {
+
+TypingDetection::TypingDetection()
+ : time_active_(0),
+ time_since_last_typing_(0),
+ penalty_counter_(0),
+ counter_since_last_detection_update_(0),
+ detection_to_report_(false),
+ new_detection_to_report_(false),
+ time_window_(10),
+ cost_per_typing_(100),
+ reporting_threshold_(300),
+ penalty_decay_(1),
+ type_event_delay_(2),
+ report_detection_update_period_(1) {
+}
+
+TypingDetection::~TypingDetection() {}
+
+bool TypingDetection::Process(bool key_pressed, bool vad_activity) {
+ if (vad_activity)
+ time_active_++;
+ else
+ time_active_ = 0;
+
+ // Keep track if time since last typing event
+ if (key_pressed)
+ time_since_last_typing_ = 0;
+ else
+ ++time_since_last_typing_;
+
+ if (time_since_last_typing_ < type_event_delay_ &&
+ vad_activity &&
+ time_active_ < time_window_) {
+ penalty_counter_ += cost_per_typing_;
+ if (penalty_counter_ > reporting_threshold_)
+ new_detection_to_report_ = true;
+ }
+
+ if (penalty_counter_ > 0)
+ penalty_counter_ -= penalty_decay_;
+
+ if (++counter_since_last_detection_update_ ==
+ report_detection_update_period_) {
+ detection_to_report_ = new_detection_to_report_;
+ new_detection_to_report_ = false;
+ counter_since_last_detection_update_ = 0;
+ }
+
+ return detection_to_report_;
+}
+
+int TypingDetection::TimeSinceLastDetectionInSeconds() {
+ // Round to whole seconds.
+ return (time_since_last_typing_ + 50) / 100;
+}
+
+void TypingDetection::SetParameters(int time_window,
+ int cost_per_typing,
+ int reporting_threshold,
+ int penalty_decay,
+ int type_event_delay,
+ int report_detection_update_period) {
+ if (time_window) time_window_ = time_window;
+
+ if (cost_per_typing) cost_per_typing_ = cost_per_typing;
+
+ if (reporting_threshold) reporting_threshold_ = reporting_threshold;
+
+ if (penalty_decay) penalty_decay_ = penalty_decay;
+
+ if (type_event_delay) type_event_delay_ = type_event_delay;
+
+ if (report_detection_update_period)
+ report_detection_update_period_ = report_detection_update_period;
+}
+
+} // namespace webrtc
diff --git a/chromium/third_party/webrtc/modules/audio_processing/typing_detection.h b/chromium/third_party/webrtc/modules/audio_processing/typing_detection.h
new file mode 100644
index 00000000000..5fa6456e9e9
--- /dev/null
+++ b/chromium/third_party/webrtc/modules/audio_processing/typing_detection.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
+
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/typedefs.h"
+
+namespace webrtc {
+
+class TypingDetection {
+ public:
+ TypingDetection();
+ virtual ~TypingDetection();
+
+ // Run the detection algortihm. Shall be called every 10 ms. Returns true if
+ // typing is detected, or false if not, based on the update period as set with
+ // SetParameters(). See |report_detection_update_period_| description below.
+ bool Process(bool key_pressed, bool vad_activity);
+
+ // Gets the time in seconds since the last detection.
+ int TimeSinceLastDetectionInSeconds();
+
+ // Sets the algorithm parameters. A parameter value of 0 leaves it unchanged.
+ // See the correspondning member variables below for descriptions.
+ void SetParameters(int time_window,
+ int cost_per_typing,
+ int reporting_threshold,
+ int penalty_decay,
+ int type_event_delay,
+ int report_detection_update_period);
+
+ private:
+ int time_active_;
+ int time_since_last_typing_;
+ int penalty_counter_;
+
+ // Counter since last time the detection status reported by Process() was
+ // updated. See also |report_detection_update_period_|.
+ int counter_since_last_detection_update_;
+
+ // The detection status to report. Updated every
+ // |report_detection_update_period_| call to Process().
+ bool detection_to_report_;
+
+ // What |detection_to_report_| should be set to next time it is updated.
+ bool new_detection_to_report_;
+
+ // Settable threshold values.
+
+ // Number of 10 ms slots accepted to count as a hit.
+ int time_window_;
+
+ // Penalty added for a typing + activity coincide.
+ int cost_per_typing_;
+
+ // Threshold for |penalty_counter_|.
+ int reporting_threshold_;
+
+ // How much we reduce |penalty_counter_| every 10 ms.
+ int penalty_decay_;
+
+ // How old typing events we allow.
+ int type_event_delay_;
+
+ // Settable update period.
+
+ // Number of 10 ms slots between each update of the detection status returned
+ // by Process(). This inertia added to the algorithm is usually desirable and
+ // provided so that consumers of the class don't have to implement that
+ // themselves if they don't wish.
+ // If set to 1, each call to Process() will return the detection status for
+ // that 10 ms slot.
+ // If set to N (where N > 1), the detection status returned from Process()
+ // will remain the same until Process() has been called N times. Then, if none
+ // of the last N calls to Process() has detected typing for each respective
+ // 10 ms slot, Process() will return false. If at least one of the last N
+ // calls has detected typing, Process() will return true. And that returned
+ // status will then remain the same until the next N calls have been done.
+ int report_detection_update_period_;
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_TYPING_DETECTION_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.c b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.c
index 6d6e9bc97bb..3b2043267ff 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.c
@@ -30,10 +30,6 @@ static const float kMinHistogramThreshold = 1.5f;
static const int kMinRequiredHits = 10;
static const int kMaxHitsWhenPossiblyNonCausal = 10;
static const int kMaxHitsWhenPossiblyCausal = 1000;
-// TODO(bjornv): Make kMaxDelayDifference a configurable parameter, since it
-// corresponds to the filter length if the delay estimation is used in echo
-// control.
-static const int kMaxDelayDifference = 32;
static const float kQ14Scaling = 1.f / (1 << 14); // Scaling by 2^14 to get Q0.
static const float kFractionSlope = 0.05f;
static const float kMinFractionWhenPossiblyCausal = 0.5f;
@@ -195,8 +191,8 @@ static int HistogramBasedValidation(const BinaryDelayEstimator* self,
// depending on the distance between the |candidate_delay| and |last_delay|.
// TODO(bjornv): How much can we gain by turning the fraction calculation
// into tables?
- if (delay_difference >= kMaxDelayDifference) {
- fraction = 1.f - kFractionSlope * (delay_difference - kMaxDelayDifference);
+ if (delay_difference > self->allowed_offset) {
+ fraction = 1.f - kFractionSlope * (delay_difference - self->allowed_offset);
fraction = (fraction > kMinFractionWhenPossiblyCausal ? fraction :
kMinFractionWhenPossiblyCausal);
} else if (delay_difference < 0) {
@@ -308,6 +304,39 @@ void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self) {
memset(self->far_bit_counts, 0, sizeof(int) * self->history_size);
}
+void WebRtc_SoftResetBinaryDelayEstimatorFarend(
+ BinaryDelayEstimatorFarend* self, int delay_shift) {
+ int abs_shift = abs(delay_shift);
+ int shift_size = 0;
+ int dest_index = 0;
+ int src_index = 0;
+ int padding_index = 0;
+
+ assert(self != NULL);
+ shift_size = self->history_size - abs_shift;
+ assert(shift_size > 0);
+ if (delay_shift == 0) {
+ return;
+ } else if (delay_shift > 0) {
+ dest_index = abs_shift;
+ } else if (delay_shift < 0) {
+ src_index = abs_shift;
+ padding_index = shift_size;
+ }
+
+ // Shift and zero pad buffers.
+ memmove(&self->binary_far_history[dest_index],
+ &self->binary_far_history[src_index],
+ sizeof(*self->binary_far_history) * shift_size);
+ memset(&self->binary_far_history[padding_index], 0,
+ sizeof(*self->binary_far_history) * abs_shift);
+ memmove(&self->far_bit_counts[dest_index],
+ &self->far_bit_counts[src_index],
+ sizeof(*self->far_bit_counts) * shift_size);
+ memset(&self->far_bit_counts[padding_index], 0,
+ sizeof(*self->far_bit_counts) * abs_shift);
+}
+
void WebRtc_AddBinaryFarSpectrum(BinaryDelayEstimatorFarend* handle,
uint32_t binary_far_spectrum) {
assert(handle != NULL);
@@ -349,10 +378,10 @@ void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self) {
}
BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
- BinaryDelayEstimatorFarend* farend, int lookahead) {
+ BinaryDelayEstimatorFarend* farend, int max_lookahead) {
BinaryDelayEstimator* self = NULL;
- if ((farend != NULL) && (lookahead >= 0)) {
+ if ((farend != NULL) && (max_lookahead >= 0)) {
// Sanity conditions fulfilled.
self = malloc(sizeof(BinaryDelayEstimator));
}
@@ -361,7 +390,11 @@ BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
int malloc_fail = 0;
self->farend = farend;
- self->near_history_size = lookahead + 1;
+ self->near_history_size = max_lookahead + 1;
+ self->robust_validation_enabled = 0; // Disabled by default.
+ self->allowed_offset = 0;
+
+ self->lookahead = max_lookahead;
// Allocate memory for spectrum buffers. The extra array element in
// |mean_bit_counts| and |histogram| is a dummy element only used while
@@ -374,7 +407,7 @@ BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
malloc_fail |= (self->bit_counts == NULL);
// Allocate memory for history buffers.
- self->binary_near_history = malloc((lookahead + 1) * sizeof(uint32_t));
+ self->binary_near_history = malloc((max_lookahead + 1) * sizeof(uint32_t));
malloc_fail |= (self->binary_near_history == NULL);
self->histogram = malloc((farend->history_size + 1) * sizeof(float));
@@ -400,26 +433,40 @@ void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self) {
self->mean_bit_counts[i] = (20 << 9); // 20 in Q9.
self->histogram[i] = 0.f;
}
- self->minimum_probability = (32 << 9); // 32 in Q9.
- self->last_delay_probability = (32 << 9); // 32 in Q9.
+ self->minimum_probability = kMaxBitCountsQ9; // 32 in Q9.
+ self->last_delay_probability = (int) kMaxBitCountsQ9; // 32 in Q9.
// Default return value if we're unable to estimate. -1 is used for errors.
self->last_delay = -2;
- self->robust_validation_enabled = 0; // Disabled by default.
self->last_candidate_delay = -2;
self->compare_delay = self->farend->history_size;
self->candidate_hits = 0;
self->last_delay_histogram = 0.f;
}
+int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self,
+ int delay_shift) {
+ int lookahead = 0;
+ assert(self != NULL);
+ lookahead = self->lookahead;
+ self->lookahead -= delay_shift;
+ if (self->lookahead < 0) {
+ self->lookahead = 0;
+ }
+ if (self->lookahead > self->near_history_size - 1) {
+ self->lookahead = self->near_history_size - 1;
+ }
+ return lookahead - self->lookahead;
+}
+
int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
uint32_t binary_near_spectrum) {
int i = 0;
int candidate_delay = -1;
int valid_candidate = 0;
- int32_t value_best_candidate = 32 << 9; // 32 in Q9, (max |mean_bit_counts|).
+ int32_t value_best_candidate = kMaxBitCountsQ9;
int32_t value_worst_candidate = 0;
int32_t valley_depth = 0;
@@ -430,8 +477,7 @@ int WebRtc_ProcessBinarySpectrum(BinaryDelayEstimator* self,
memmove(&(self->binary_near_history[1]), &(self->binary_near_history[0]),
(self->near_history_size - 1) * sizeof(uint32_t));
self->binary_near_history[0] = binary_near_spectrum;
- binary_near_spectrum =
- self->binary_near_history[self->near_history_size - 1];
+ binary_near_spectrum = self->binary_near_history[self->lookahead];
}
// Compare with delayed spectra and store the |bit_counts| for each delay.
@@ -547,21 +593,23 @@ int WebRtc_binary_last_delay(BinaryDelayEstimator* self) {
return self->last_delay;
}
-int WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) {
- int delay_quality = 0;
+float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self) {
+ float quality = 0;
assert(self != NULL);
- // |last_delay_probability| is the opposite of quality and states how deep the
- // minimum of the cost function is. The value states how many non-matching
- // bits we have between the binary spectra for the corresponding delay
- // estimate. The range is thus from 0 to 32, since we use 32 bits in the
- // binary spectra.
-
- // Return the |delay_quality| = 1 - |last_delay_probability| / 32 (in Q14).
- delay_quality = (32 << 9) - self->last_delay_probability;
- if (delay_quality < 0) {
- delay_quality = 0;
+
+ if (self->robust_validation_enabled) {
+ // Simply a linear function of the histogram height at delay estimate.
+ quality = self->histogram[self->compare_delay] / kHistogramMax;
+ } else {
+ // Note that |last_delay_probability| states how deep the minimum of the
+ // cost function is, so it is rather an error probability.
+ quality = (float) (kMaxBitCountsQ9 - self->last_delay_probability) /
+ kMaxBitCountsQ9;
+ if (quality < 0) {
+ quality = 0;
+ }
}
- return delay_quality;
+ return quality;
}
void WebRtc_MeanEstimatorFix(int32_t new_value,
diff --git a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.h b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.h
index 7ffb81b8b18..3d5ffce20e9 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator.h
@@ -16,6 +16,8 @@
#include "webrtc/typedefs.h"
+static const int32_t kMaxBitCountsQ9 = (32 << 9); // 32 matching bits in Q9.
+
typedef struct {
// Pointer to bit counts.
int* far_bit_counts;
@@ -44,12 +46,16 @@ typedef struct {
// Robust validation
int robust_validation_enabled;
+ int allowed_offset;
int last_candidate_delay;
int compare_delay;
int candidate_hits;
float* histogram;
float last_delay_histogram;
+ // For dynamically changing the lookahead when using SoftReset...().
+ int lookahead;
+
// Far-end binary spectrum history buffer etc.
BinaryDelayEstimatorFarend* farend;
} BinaryDelayEstimator;
@@ -90,6 +96,15 @@ BinaryDelayEstimatorFarend* WebRtc_CreateBinaryDelayEstimatorFarend(
//
void WebRtc_InitBinaryDelayEstimatorFarend(BinaryDelayEstimatorFarend* self);
+// Soft resets the delay estimation far-end instance created with
+// WebRtc_CreateBinaryDelayEstimatorFarend(...).
+//
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+void WebRtc_SoftResetBinaryDelayEstimatorFarend(
+ BinaryDelayEstimatorFarend* self, int delay_shift);
+
// Adds the binary far-end spectrum to the internal far-end history buffer. This
// spectrum is used as reference when calculating the delay using
// WebRtc_ProcessBinarySpectrum().
@@ -121,38 +136,10 @@ void WebRtc_FreeBinaryDelayEstimator(BinaryDelayEstimator* self);
// Allocates the memory needed by the binary delay estimation. The memory needs
// to be initialized separately through WebRtc_InitBinaryDelayEstimator(...).
//
-// Inputs:
-// - farend : Pointer to the far-end part of the Binary Delay
-// Estimator. This memory has to be created separately
-// prior to this call using
-// WebRtc_CreateBinaryDelayEstimatorFarend().
-//
-// Note that BinaryDelayEstimator does not take
-// ownership of |farend|.
-//
-// - lookahead : Amount of non-causal lookahead to use. This can
-// detect cases in which a near-end signal occurs before
-// the corresponding far-end signal. It will delay the
-// estimate for the current block by an equal amount,
-// and the returned values will be offset by it.
-//
-// A value of zero is the typical no-lookahead case.
-// This also represents the minimum delay which can be
-// estimated.
-//
-// Note that the effective range of delay estimates is
-// [-|lookahead|,... ,|history_size|-|lookahead|)
-// where |history_size| was set upon creating the far-end
-// history buffer size.
-//
-// Return value:
-// - BinaryDelayEstimator*
-// : Created |handle|. If the memory can't be allocated
-// or if any of the input parameters are invalid NULL
-// is returned.
-//
+// See WebRtc_CreateDelayEstimator(..) in delay_estimator_wrapper.c for detailed
+// description.
BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
- BinaryDelayEstimatorFarend* farend, int lookahead);
+ BinaryDelayEstimatorFarend* farend, int max_lookahead);
// Initializes the delay estimation instance created with
// WebRtc_CreateBinaryDelayEstimator(...).
@@ -165,6 +152,18 @@ BinaryDelayEstimator* WebRtc_CreateBinaryDelayEstimator(
//
void WebRtc_InitBinaryDelayEstimator(BinaryDelayEstimator* self);
+// Soft resets the delay estimation instance created with
+// WebRtc_CreateBinaryDelayEstimator(...).
+//
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+// Return value:
+// - actual_shifts : The actual number of shifts performed.
+//
+int WebRtc_SoftResetBinaryDelayEstimator(BinaryDelayEstimator* self,
+ int delay_shift);
+
// Estimates and returns the delay between the binary far-end and binary near-
// end spectra. It is assumed the binary far-end spectrum has been added using
// WebRtc_AddBinaryFarSpectrum() prior to this call. The value will be offset by
@@ -199,17 +198,12 @@ int WebRtc_binary_last_delay(BinaryDelayEstimator* self);
// Returns the estimation quality of the last calculated delay updated by the
// function WebRtc_ProcessBinarySpectrum(...). The estimation quality is a value
-// in the interval [0, 1] in Q14. The higher the value, the better quality.
-//
-// Input:
-// - self : Pointer to the delay estimation instance.
+// in the interval [0, 1]. The higher the value, the better the quality.
//
// Return value:
-// - delay_quality : >= 0 - Estimation quality (in Q14) of last
-// calculated delay value.
-// -2 - Insufficient data for estimation.
-//
-int WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self);
+// - delay_quality : >= 0 - Estimation quality of last calculated
+// delay value.
+float WebRtc_binary_last_delay_quality(BinaryDelayEstimator* self);
// Updates the |mean_value| recursively with a step size of 2^-|factor|. This
// function is used internally in the Binary Delay Estimator as well as the
diff --git a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_unittest.cc
index bdc199cafbb..ca0901d6db2 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_unittest.cc
@@ -26,6 +26,9 @@ enum { kLookahead = 10 };
// Length of binary spectrum sequence.
enum { kSequenceLength = 400 };
+const int kEnable[] = { 0, 1 };
+const size_t kSizeEnable = sizeof(kEnable) / sizeof(*kEnable);
+
class DelayEstimatorTest : public ::testing::Test {
protected:
DelayEstimatorTest();
@@ -38,7 +41,8 @@ class DelayEstimatorTest : public ::testing::Test {
void RunBinarySpectra(BinaryDelayEstimator* binary1,
BinaryDelayEstimator* binary2,
int near_offset, int lookahead_offset, int far_offset);
- void RunBinarySpectraTest(int near_offset, int lookahead_offset);
+ void RunBinarySpectraTest(int near_offset, int lookahead_offset,
+ int ref_robust_validation, int robust_validation);
void* handle_;
DelayEstimator* self_;
@@ -113,7 +117,7 @@ void DelayEstimatorTest::Init() {
EXPECT_EQ(0, farend_self_->far_spectrum_initialized);
EXPECT_EQ(0, self_->near_spectrum_initialized);
EXPECT_EQ(-2, WebRtc_last_delay(handle_)); // Delay in initial state.
- EXPECT_EQ(0, WebRtc_last_delay_quality(handle_)); // Zero quality.
+ EXPECT_FLOAT_EQ(0, WebRtc_last_delay_quality(handle_)); // Zero quality.
}
void DelayEstimatorTest::InitBinary() {
@@ -143,6 +147,8 @@ void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1,
int near_offset,
int lookahead_offset,
int far_offset) {
+ int different_validations = binary1->robust_validation_enabled ^
+ binary2->robust_validation_enabled;
WebRtc_InitBinaryDelayEstimatorFarend(binary_farend_);
WebRtc_InitBinaryDelayEstimator(binary1);
WebRtc_InitBinaryDelayEstimator(binary2);
@@ -167,19 +173,32 @@ void DelayEstimatorTest::RunBinarySpectra(BinaryDelayEstimator* binary1,
if ((delay_1 != -2) && (delay_2 != -2)) {
EXPECT_EQ(delay_1, delay_2 - lookahead_offset - near_offset);
}
+ // For the case of identical signals |delay_1| and |delay_2| should match
+ // all the time, unless one of them has robust validation turned on. In
+ // that case the robust validation leaves the initial state faster.
if ((near_offset == 0) && (lookahead_offset == 0)) {
- EXPECT_EQ(delay_1, delay_2);
+ if (!different_validations) {
+ EXPECT_EQ(delay_1, delay_2);
+ } else {
+ if (binary1->robust_validation_enabled) {
+ EXPECT_GE(delay_1, delay_2);
+ } else {
+ EXPECT_GE(delay_2, delay_1);
+ }
+ }
}
}
// Verify that we have left the initialized state.
EXPECT_NE(-2, WebRtc_binary_last_delay(binary1));
- EXPECT_NE(0, WebRtc_binary_last_delay_quality(binary1));
+ EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary1));
EXPECT_NE(-2, WebRtc_binary_last_delay(binary2));
- EXPECT_NE(0, WebRtc_binary_last_delay_quality(binary2));
+ EXPECT_LT(0, WebRtc_binary_last_delay_quality(binary2));
}
void DelayEstimatorTest::RunBinarySpectraTest(int near_offset,
- int lookahead_offset) {
+ int lookahead_offset,
+ int ref_robust_validation,
+ int robust_validation) {
BinaryDelayEstimator* binary2 =
WebRtc_CreateBinaryDelayEstimator(binary_farend_,
kLookahead + lookahead_offset);
@@ -187,6 +206,8 @@ void DelayEstimatorTest::RunBinarySpectraTest(int near_offset,
// the delay is equivalent with a positive |offset| of the far-end sequence.
// For non-causal systems the delay is equivalent with a negative |offset| of
// the far-end sequence.
+ binary_->robust_validation_enabled = ref_robust_validation;
+ binary2->robust_validation_enabled = robust_validation;
for (int offset = -kLookahead;
offset < kMaxDelay - lookahead_offset - near_offset;
offset++) {
@@ -194,6 +215,7 @@ void DelayEstimatorTest::RunBinarySpectraTest(int near_offset,
}
WebRtc_FreeBinaryDelayEstimator(binary2);
binary2 = NULL;
+ binary_->robust_validation_enabled = 0; // Reset reference.
}
TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) {
@@ -206,14 +228,12 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) {
void* handle = farend_handle_;
handle = WebRtc_CreateDelayEstimatorFarend(33, kMaxDelay + kLookahead);
EXPECT_TRUE(handle == NULL);
- handle = farend_handle_;
handle = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize, 1);
EXPECT_TRUE(handle == NULL);
handle = handle_;
handle = WebRtc_CreateDelayEstimator(NULL, kLookahead);
EXPECT_TRUE(handle == NULL);
- handle = handle_;
handle = WebRtc_CreateDelayEstimator(farend_handle_, -1);
EXPECT_TRUE(handle == NULL);
@@ -246,10 +266,18 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) {
EXPECT_EQ(-1, WebRtc_AddFarSpectrumFix(farend_handle_, far_u16_,
spectrum_size_, 16));
+ // WebRtc_set_allowed_offset() should return -1 if we have:
+ // 1) NULL pointer as |handle|.
+ // 2) |allowed_offset| < 0.
+ EXPECT_EQ(-1, WebRtc_set_allowed_offset(NULL, 0));
+ EXPECT_EQ(-1, WebRtc_set_allowed_offset(handle_, -1));
+
+ EXPECT_EQ(-1, WebRtc_get_allowed_offset(NULL));
+
// WebRtc_enable_robust_validation() should return -1 if we have:
// 1) NULL pointer as |handle|.
// 2) Incorrect |enable| value (not 0 or 1).
- EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, 0));
+ EXPECT_EQ(-1, WebRtc_enable_robust_validation(NULL, kEnable[0]));
EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, -1));
EXPECT_EQ(-1, WebRtc_enable_robust_validation(handle_, 2));
@@ -286,21 +314,31 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfWrapper) {
// WebRtc_last_delay() should return -1 if we have a NULL pointer as |handle|.
EXPECT_EQ(-1, WebRtc_last_delay(NULL));
- // WebRtc_last_delay_quality() should return -1 if we have a NULL pointer as
- // |handle|.
- EXPECT_EQ(-1, WebRtc_last_delay_quality(NULL));
-
// Free any local memory if needed.
WebRtc_FreeDelayEstimator(handle);
}
+TEST_F(DelayEstimatorTest, VerifyAllowedOffset) {
+ // Is set to zero by default.
+ EXPECT_EQ(0, WebRtc_get_allowed_offset(handle_));
+ for (int i = 1; i >= 0; i--) {
+ EXPECT_EQ(0, WebRtc_set_allowed_offset(handle_, i));
+ EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_));
+ Init();
+ // Unaffected over a reset.
+ EXPECT_EQ(i, WebRtc_get_allowed_offset(handle_));
+ }
+}
+
TEST_F(DelayEstimatorTest, VerifyEnableRobustValidation) {
- Init();
// Disabled by default.
EXPECT_EQ(0, WebRtc_is_robust_validation_enabled(handle_));
- for (int i = 1; i >= 0; i--) {
- EXPECT_EQ(0, WebRtc_enable_robust_validation(handle_, i));
- EXPECT_EQ(i, WebRtc_is_robust_validation_enabled(handle_));
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ EXPECT_EQ(0, WebRtc_enable_robust_validation(handle_, kEnable[i]));
+ EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_));
+ Init();
+ // Unaffected over a reset.
+ EXPECT_EQ(kEnable[i], WebRtc_is_robust_validation_enabled(handle_));
}
}
@@ -335,6 +373,7 @@ TEST_F(DelayEstimatorTest, CorrectLastDelay) {
// (|last_delay| = -2). Then we compare the Process() output with the
// last_delay() call.
+ // TODO(bjornv): Update quality values for robust validation.
int last_delay = 0;
// Floating point operations.
Init();
@@ -345,13 +384,16 @@ TEST_F(DelayEstimatorTest, CorrectLastDelay) {
spectrum_size_);
if (last_delay != -2) {
EXPECT_EQ(last_delay, WebRtc_last_delay(handle_));
- EXPECT_EQ(7203, WebRtc_last_delay_quality(handle_));
+ if (!WebRtc_is_robust_validation_enabled(handle_)) {
+ EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9,
+ WebRtc_last_delay_quality(handle_));
+ }
break;
}
}
// Verify that we have left the initialized state.
EXPECT_NE(-2, WebRtc_last_delay(handle_));
- EXPECT_NE(0, WebRtc_last_delay_quality(handle_));
+ EXPECT_LT(0, WebRtc_last_delay_quality(handle_));
// Fixed point operations.
Init();
@@ -362,13 +404,16 @@ TEST_F(DelayEstimatorTest, CorrectLastDelay) {
spectrum_size_, 0);
if (last_delay != -2) {
EXPECT_EQ(last_delay, WebRtc_last_delay(handle_));
- EXPECT_EQ(7203, WebRtc_last_delay_quality(handle_));
+ if (!WebRtc_is_robust_validation_enabled(handle_)) {
+ EXPECT_FLOAT_EQ(7203.f / kMaxBitCountsQ9,
+ WebRtc_last_delay_quality(handle_));
+ }
break;
}
}
// Verify that we have left the initialized state.
EXPECT_NE(-2, WebRtc_last_delay(handle_));
- EXPECT_NE(0, WebRtc_last_delay_quality(handle_));
+ EXPECT_LT(0, WebRtc_last_delay_quality(handle_));
}
TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimatorFarend) {
@@ -391,18 +436,14 @@ TEST_F(DelayEstimatorTest, CorrectErrorReturnsOfBinaryEstimator) {
BinaryDelayEstimator* binary_handle = binary_;
// WebRtc_CreateBinaryDelayEstimator() should return -1 if we have a NULL
- // pointer as |binary_handle| or invalid input values. Upon failure, the
+ // pointer as |binary_farend| or invalid input values. Upon failure, the
// |binary_handle| should be NULL.
// Make sure we have a non-NULL value at start, so we can detect NULL after
// create failure.
binary_handle = WebRtc_CreateBinaryDelayEstimator(NULL, kLookahead);
EXPECT_TRUE(binary_handle == NULL);
- binary_handle = binary_;
binary_handle = WebRtc_CreateBinaryDelayEstimator(binary_farend_, -1);
EXPECT_TRUE(binary_handle == NULL);
- binary_handle = binary_;
- binary_handle = WebRtc_CreateBinaryDelayEstimator(0, 0);
- EXPECT_TRUE(binary_handle == NULL);
}
TEST_F(DelayEstimatorTest, MeanEstimatorFix) {
@@ -431,26 +472,70 @@ TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearSameSpectrum) {
// the signal accordingly. We create two Binary Delay Estimators and feed them
// with the same signals, so they should output the same results.
// We verify both causal and non-causal delays.
+ // For these noise free signals, the robust validation should not have an
+ // impact, hence we turn robust validation on/off for both reference and
+ // delayed near end.
- RunBinarySpectraTest(0, 0);
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ for (size_t j = 0; j < kSizeEnable; ++j) {
+ RunBinarySpectraTest(0, 0, kEnable[i], kEnable[j]);
+ }
+ }
}
TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentSpectrum) {
// In this test we use the same setup as above, but we now feed the two Binary
// Delay Estimators with different signals, so they should output different
// results.
+ // For these noise free signals, the robust validation should not have an
+ // impact, hence we turn robust validation on/off for both reference and
+ // delayed near end.
const int kNearOffset = 1;
- RunBinarySpectraTest(kNearOffset, 0);
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ for (size_t j = 0; j < kSizeEnable; ++j) {
+ RunBinarySpectraTest(kNearOffset, 0, kEnable[i], kEnable[j]);
+ }
+ }
}
TEST_F(DelayEstimatorTest, ExactDelayEstimateMultipleNearDifferentLookahead) {
// In this test we use the same setup as above, feeding the two Binary
// Delay Estimators with the same signals. The difference is that we create
// them with different lookahead.
+ // For these noise free signals, the robust validation should not have an
+ // impact, hence we turn robust validation on/off for both reference and
+ // delayed near end.
const int kLookaheadOffset = 1;
- RunBinarySpectraTest(0, kLookaheadOffset);
+ for (size_t i = 0; i < kSizeEnable; ++i) {
+ for (size_t j = 0; j < kSizeEnable; ++j) {
+ RunBinarySpectraTest(0, kLookaheadOffset, kEnable[i], kEnable[j]);
+ }
+ }
}
+TEST_F(DelayEstimatorTest, AllowedOffsetNoImpactWhenRobustValidationDisabled) {
+ // The same setup as in ExactDelayEstimateMultipleNearSameSpectrum with the
+ // difference that |allowed_offset| is set for the reference binary delay
+ // estimator.
+
+ binary_->allowed_offset = 10;
+ RunBinarySpectraTest(0, 0, 0, 0);
+ binary_->allowed_offset = 0; // Reset reference.
+}
+
+TEST_F(DelayEstimatorTest, VerifyLookaheadAtCreate) {
+ void* farend_handle = WebRtc_CreateDelayEstimatorFarend(kSpectrumSize,
+ kMaxDelay);
+ ASSERT_TRUE(farend_handle != NULL);
+ void* handle = WebRtc_CreateDelayEstimator(farend_handle, kLookahead);
+ ASSERT_TRUE(handle != NULL);
+ EXPECT_EQ(kLookahead, WebRtc_lookahead(handle));
+ WebRtc_FreeDelayEstimator(handle);
+ WebRtc_FreeDelayEstimatorFarend(farend_handle);
+}
+
+// TODO(bjornv): Add tests for SoftReset...(...).
+
} // namespace
diff --git a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.c b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.c
index ce4431844ee..6ec894e65e9 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.c
+++ b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.c
@@ -191,6 +191,12 @@ int WebRtc_InitDelayEstimatorFarend(void* handle) {
return 0;
}
+void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift) {
+ DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
+ assert(self != NULL);
+ WebRtc_SoftResetBinaryDelayEstimatorFarend(self->binary_farend, delay_shift);
+}
+
int WebRtc_AddFarSpectrumFix(void* handle, uint16_t* far_spectrum,
int spectrum_size, int far_q) {
DelayEstimatorFarend* self = (DelayEstimatorFarend*) handle;
@@ -261,7 +267,7 @@ void WebRtc_FreeDelayEstimator(void* handle) {
free(self);
}
-void* WebRtc_CreateDelayEstimator(void* farend_handle, int lookahead) {
+void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead) {
DelayEstimator* self = NULL;
DelayEstimatorFarend* farend = (DelayEstimatorFarend*) farend_handle;
@@ -274,7 +280,7 @@ void* WebRtc_CreateDelayEstimator(void* farend_handle, int lookahead) {
// Allocate memory for the farend spectrum handling.
self->binary_handle =
- WebRtc_CreateBinaryDelayEstimator(farend->binary_farend, lookahead);
+ WebRtc_CreateBinaryDelayEstimator(farend->binary_farend, max_lookahead);
memory_fail |= (self->binary_handle == NULL);
// Allocate memory for spectrum buffers.
@@ -312,6 +318,50 @@ int WebRtc_InitDelayEstimator(void* handle) {
return 0;
}
+int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift) {
+ DelayEstimator* self = (DelayEstimator*) handle;
+ assert(self != NULL);
+ return WebRtc_SoftResetBinaryDelayEstimator(self->binary_handle, delay_shift);
+}
+
+int WebRtc_set_lookahead(void* handle, int lookahead) {
+ DelayEstimator* self = (DelayEstimator*) handle;
+ assert(self != NULL);
+ assert(self->binary_handle != NULL);
+ if ((lookahead > self->binary_handle->near_history_size - 1) ||
+ (lookahead < 0)) {
+ return -1;
+ }
+ self->binary_handle->lookahead = lookahead;
+ return self->binary_handle->lookahead;
+}
+
+int WebRtc_lookahead(void* handle) {
+ DelayEstimator* self = (DelayEstimator*) handle;
+ assert(self != NULL);
+ assert(self->binary_handle != NULL);
+ return self->binary_handle->lookahead;
+}
+
+int WebRtc_set_allowed_offset(void* handle, int allowed_offset) {
+ DelayEstimator* self = (DelayEstimator*) handle;
+
+ if ((self == NULL) || (allowed_offset < 0)) {
+ return -1;
+ }
+ self->binary_handle->allowed_offset = allowed_offset;
+ return 0;
+}
+
+int WebRtc_get_allowed_offset(const void* handle) {
+ const DelayEstimator* self = (const DelayEstimator*) handle;
+
+ if (self == NULL) {
+ return -1;
+ }
+ return self->binary_handle->allowed_offset;
+}
+
int WebRtc_enable_robust_validation(void* handle, int enable) {
DelayEstimator* self = (DelayEstimator*) handle;
@@ -326,13 +376,12 @@ int WebRtc_enable_robust_validation(void* handle, int enable) {
return 0;
}
-int WebRtc_is_robust_validation_enabled(void* handle) {
- DelayEstimator* self = (DelayEstimator*) handle;
+int WebRtc_is_robust_validation_enabled(const void* handle) {
+ const DelayEstimator* self = (const DelayEstimator*) handle;
if (self == NULL) {
return -1;
}
- assert(self->binary_handle != NULL);
return self->binary_handle->robust_validation_enabled;
}
@@ -403,12 +452,8 @@ int WebRtc_last_delay(void* handle) {
return WebRtc_binary_last_delay(self->binary_handle);
}
-int WebRtc_last_delay_quality(void* handle) {
+float WebRtc_last_delay_quality(void* handle) {
DelayEstimator* self = (DelayEstimator*) handle;
-
- if (self == NULL) {
- return -1;
- }
-
+ assert(self != NULL);
return WebRtc_binary_last_delay_quality(self->binary_handle);
}
diff --git a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h
index 50bcddeddc2..13e86bdd438 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h
@@ -52,6 +52,13 @@ void* WebRtc_CreateDelayEstimatorFarend(int spectrum_size, int history_size);
//
int WebRtc_InitDelayEstimatorFarend(void* handle);
+// Soft resets the far-end part of the delay estimation instance returned by
+// WebRtc_CreateDelayEstimatorFarend(...).
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+void WebRtc_SoftResetDelayEstimatorFarend(void* handle, int delay_shift);
+
// Adds the far-end spectrum to the far-end history buffer. This spectrum is
// used as reference when calculating the delay using
// WebRtc_ProcessSpectrum().
@@ -91,11 +98,18 @@ void WebRtc_FreeDelayEstimator(void* handle);
// ownership of |farend_handle|, which has to be torn
// down properly after this instance.
//
-// - lookahead : Amount of non-causal lookahead to use. This can
-// detect cases in which a near-end signal occurs before
-// the corresponding far-end signal. It will delay the
-// estimate for the current block by an equal amount,
-// and the returned values will be offset by it.
+// - max_lookahead : Maximum amount of non-causal lookahead allowed. The
+// actual amount of lookahead used can be controlled by
+// WebRtc_set_lookahead(...). The default |lookahead| is
+// set to |max_lookahead| at create time. Use
+// WebRtc_set_lookahead(...) before start if a different
+// value is desired.
+//
+// Using lookahead can detect cases in which a near-end
+// signal occurs before the corresponding far-end signal.
+// It will delay the estimate for the current block by an
+// equal amount, and the returned values will be offset
+// by it.
//
// A value of zero is the typical no-lookahead case.
// This also represents the minimum delay which can be
@@ -111,7 +125,7 @@ void WebRtc_FreeDelayEstimator(void* handle);
// if any of the input parameters are invalid NULL is
// returned.
//
-void* WebRtc_CreateDelayEstimator(void* farend_handle, int lookahead);
+void* WebRtc_CreateDelayEstimator(void* farend_handle, int max_lookahead);
// Initializes the delay estimation instance returned by
// WebRtc_CreateDelayEstimator(...)
@@ -123,17 +137,59 @@ void* WebRtc_CreateDelayEstimator(void* farend_handle, int lookahead);
//
int WebRtc_InitDelayEstimator(void* handle);
+// Soft resets the delay estimation instance returned by
+// WebRtc_CreateDelayEstimator(...)
+// Input:
+// - delay_shift : The amount of blocks to shift history buffers.
+//
+// Return value:
+// - actual_shifts : The actual number of shifts performed.
+//
+int WebRtc_SoftResetDelayEstimator(void* handle, int delay_shift);
+
+// Sets the amount of |lookahead| to use. Valid values are [0, max_lookahead]
+// where |max_lookahead| was set at create time through
+// WebRtc_CreateDelayEstimator(...).
+//
+// Input:
+// - lookahead : The amount of blocks to shift history buffers.
+//
+// Return value:
+// - new_lookahead : The actual number of shifts performed.
+//
+int WebRtc_set_lookahead(void* handle, int lookahead);
+
+// Returns the amount of lookahead we currently use.
+int WebRtc_lookahead(void* handle);
+
+// Sets the |allowed_offset| used in the robust validation scheme. If the
+// delay estimator is used in an echo control component, this parameter is
+// related to the filter length. In principle |allowed_offset| should be set to
+// the echo control filter length minus the expected echo duration, i.e., the
+// delay offset the echo control can handle without quality regression. The
+// default value, used if not set manually, is zero. Note that |allowed_offset|
+// has to be non-negative.
+// Inputs:
+// - handle : Pointer to the delay estimation instance.
+// - allowed_offset : The amount of delay offset, measured in partitions,
+// the echo control filter can handle.
+int WebRtc_set_allowed_offset(void* handle, int allowed_offset);
+
+// Returns the |allowed_offset| in number of partitions.
+int WebRtc_get_allowed_offset(const void* handle);
+
// TODO(bjornv): Implement this functionality. Currently, enabling it has no
// impact, hence this is an empty API.
// Enables/Disables a robust validation functionality in the delay estimation.
-// This is by default disabled upon initialization.
+// This is by default set to disabled at create time. The state is preserved
+// over a reset.
// Inputs:
// - handle : Pointer to the delay estimation instance.
// - enable : Enable (1) or disable (0) this feature.
int WebRtc_enable_robust_validation(void* handle, int enable);
// Returns 1 if robust validation is enabled and 0 if disabled.
-int WebRtc_is_robust_validation_enabled(void* handle);
+int WebRtc_is_robust_validation_enabled(const void* handle);
// Estimates and returns the delay between the far-end and near-end blocks. The
// value will be offset by the lookahead (i.e. the lookahead should be
@@ -179,18 +235,11 @@ int WebRtc_last_delay(void* handle);
// Returns the estimation quality/probability of the last calculated delay
// updated by the function WebRtc_DelayEstimatorProcess(...). The estimation
-// quality is a value in the interval [0, 1] in Q9. The higher the value, the
-// better quality.
-//
-// Input:
-// - handle : Pointer to the delay estimation instance.
+// quality is a value in the interval [0, 1]. The higher the value, the better
+// the quality.
//
// Return value:
-// - delay_quality : >= 0 - Estimation quality (in Q9) of last calculated
-// delay value.
-// -1 - Error.
-// -2 - Insufficient data for estimation.
-//
-int WebRtc_last_delay_quality(void* handle);
+// - delay_quality : >= 0 - Estimation quality of last calculated delay.
+float WebRtc_last_delay_quality(void* handle);
#endif // WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_WRAPPER_H_
diff --git a/chromium/third_party/webrtc/modules/audio_processing/utility/ring_buffer_unittest.cc b/chromium/third_party/webrtc/modules/audio_processing/utility/ring_buffer_unittest.cc
index 2b7634dd073..5dacf0b804c 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/utility/ring_buffer_unittest.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/utility/ring_buffer_unittest.cc
@@ -22,7 +22,12 @@ extern "C" {
namespace webrtc {
-typedef scoped_ptr_malloc<RingBuffer, WebRtc_FreeBuffer> scoped_ring_buffer;
+struct FreeBufferDeleter {
+ inline void operator()(void* ptr) const {
+ WebRtc_FreeBuffer(ptr);
+ }
+};
+typedef scoped_ptr<RingBuffer, FreeBufferDeleter> scoped_ring_buffer;
static void AssertElementEq(int expected, int actual) {
ASSERT_EQ(expected, actual);
@@ -56,8 +61,8 @@ static void RandomStressTest(int** data_ptr) {
srand(seed);
for (int i = 0; i < kNumTests; i++) {
const int buffer_size = std::max(rand() % kMaxBufferSize, 1);
- scoped_array<int> write_data(new int[buffer_size]);
- scoped_array<int> read_data(new int[buffer_size]);
+ scoped_ptr<int[]> write_data(new int[buffer_size]);
+ scoped_ptr<int[]> read_data(new int[buffer_size]);
scoped_ring_buffer buffer(WebRtc_CreateBuffer(buffer_size, sizeof(int)));
ASSERT_TRUE(buffer.get() != NULL);
ASSERT_EQ(0, WebRtc_InitBuffer(buffer.get()));
diff --git a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc
index d41547c8022..c6e497ffa3e 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc
+++ b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.cc
@@ -13,10 +13,8 @@
#include <assert.h>
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-
#include "webrtc/modules/audio_processing/audio_buffer.h"
-#include "webrtc/modules/audio_processing/audio_processing_impl.h"
+#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
@@ -39,9 +37,11 @@ int MapSetting(VoiceDetection::Likelihood likelihood) {
}
} // namespace
-VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessingImpl* apm)
- : ProcessingComponent(apm),
+VoiceDetectionImpl::VoiceDetectionImpl(const AudioProcessing* apm,
+ CriticalSectionWrapper* crit)
+ : ProcessingComponent(),
apm_(apm),
+ crit_(crit),
stream_has_voice_(false),
using_external_vad_(false),
likelihood_(kLowLikelihood),
@@ -61,7 +61,7 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
assert(audio->samples_per_split_channel() <= 160);
- int16_t* mixed_data = audio->low_pass_split_data(0);
+ const int16_t* mixed_data = audio->low_pass_split_data(0);
if (audio->num_channels() > 1) {
audio->CopyAndMixLowPass(1);
mixed_data = audio->mixed_low_pass_data(0);
@@ -70,7 +70,7 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
// TODO(ajm): concatenate data in frame buffer here.
int vad_ret = WebRtcVad_Process(static_cast<Handle*>(handle(0)),
- apm_->split_sample_rate_hz(),
+ apm_->proc_split_sample_rate_hz(),
mixed_data,
frame_size_samples_);
if (vad_ret == 0) {
@@ -87,7 +87,7 @@ int VoiceDetectionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
}
int VoiceDetectionImpl::Enable(bool enable) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
return EnableComponent(enable);
}
@@ -108,7 +108,7 @@ bool VoiceDetectionImpl::stream_has_voice() const {
}
int VoiceDetectionImpl::set_likelihood(VoiceDetection::Likelihood likelihood) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
if (MapSetting(likelihood) == -1) {
return apm_->kBadParameterError;
}
@@ -122,7 +122,7 @@ VoiceDetection::Likelihood VoiceDetectionImpl::likelihood() const {
}
int VoiceDetectionImpl::set_frame_size_ms(int size) {
- CriticalSectionScoped crit_scoped(apm_->crit());
+ CriticalSectionScoped crit_scoped(crit_);
assert(size == 10); // TODO(ajm): remove when supported.
if (size != 10 &&
size != 20 &&
@@ -146,7 +146,8 @@ int VoiceDetectionImpl::Initialize() {
}
using_external_vad_ = false;
- frame_size_samples_ = frame_size_ms_ * (apm_->split_sample_rate_hz() / 1000);
+ frame_size_samples_ = frame_size_ms_ *
+ apm_->proc_split_sample_rate_hz() / 1000;
// TODO(ajm): intialize frame buffer here.
return apm_->kNoError;
@@ -163,8 +164,8 @@ void* VoiceDetectionImpl::CreateHandle() const {
return handle;
}
-int VoiceDetectionImpl::DestroyHandle(void* handle) const {
- return WebRtcVad_Free(static_cast<Handle*>(handle));
+void VoiceDetectionImpl::DestroyHandle(void* handle) const {
+ WebRtcVad_Free(static_cast<Handle*>(handle));
}
int VoiceDetectionImpl::InitializeHandle(void* handle) const {
diff --git a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h
index f8f50e8493c..1dfdf20ae92 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/voice_detection_impl.h
@@ -15,13 +15,14 @@
#include "webrtc/modules/audio_processing/processing_component.h"
namespace webrtc {
-class AudioProcessingImpl;
+
class AudioBuffer;
+class CriticalSectionWrapper;
class VoiceDetectionImpl : public VoiceDetection,
public ProcessingComponent {
public:
- explicit VoiceDetectionImpl(const AudioProcessingImpl* apm);
+ VoiceDetectionImpl(const AudioProcessing* apm, CriticalSectionWrapper* crit);
virtual ~VoiceDetectionImpl();
int ProcessCaptureAudio(AudioBuffer* audio);
@@ -46,11 +47,12 @@ class VoiceDetectionImpl : public VoiceDetection,
virtual void* CreateHandle() const OVERRIDE;
virtual int InitializeHandle(void* handle) const OVERRIDE;
virtual int ConfigureHandle(void* handle) const OVERRIDE;
- virtual int DestroyHandle(void* handle) const OVERRIDE;
+ virtual void DestroyHandle(void* handle) const OVERRIDE;
virtual int num_handles_required() const OVERRIDE;
virtual int GetHandleError(void* handle) const OVERRIDE;
- const AudioProcessingImpl* apm_;
+ const AudioProcessing* apm_;
+ CriticalSectionWrapper* crit_;
bool stream_has_voice_;
bool using_external_vad_;
Likelihood likelihood_;