summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h')
-rw-r--r--chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h117
1 files changed, 83 insertions, 34 deletions
diff --git a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
index 2638bef6058..67e4f485043 100644
--- a/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
+++ b/chromium/third_party/webrtc/modules/audio_processing/audio_buffer.h
@@ -8,75 +8,124 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
-#define WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+#define WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_
+#include <vector>
+
+#include "webrtc/modules/audio_processing/common.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
+#include "webrtc/system_wrappers/interface/scoped_vector.h"
#include "webrtc/typedefs.h"
namespace webrtc {
-struct AudioChannel;
-struct SplitAudioChannel;
+class PushSincResampler;
+class SplitChannelBuffer;
+class IFChannelBuffer;
+
+struct SplitFilterStates {
+ SplitFilterStates() {
+ memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
+ memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
+ memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
+ memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
+ }
+
+ static const int kStateSize = 6;
+ int analysis_filter_state1[kStateSize];
+ int analysis_filter_state2[kStateSize];
+ int synthesis_filter_state1[kStateSize];
+ int synthesis_filter_state2[kStateSize];
+};
class AudioBuffer {
public:
- AudioBuffer(int max_num_channels, int samples_per_channel);
+ // TODO(ajm): Switch to take ChannelLayouts.
+ AudioBuffer(int input_samples_per_channel,
+ int num_input_channels,
+ int process_samples_per_channel,
+ int num_process_channels,
+ int output_samples_per_channel);
virtual ~AudioBuffer();
int num_channels() const;
int samples_per_channel() const;
int samples_per_split_channel() const;
+ int samples_per_keyboard_channel() const;
- int16_t* data(int channel) const;
- int16_t* low_pass_split_data(int channel) const;
- int16_t* high_pass_split_data(int channel) const;
- int16_t* mixed_data(int channel) const;
- int16_t* mixed_low_pass_data(int channel) const;
- int16_t* low_pass_reference(int channel) const;
+ int16_t* data(int channel);
+ const int16_t* data(int channel) const;
+ int16_t* low_pass_split_data(int channel);
+ const int16_t* low_pass_split_data(int channel) const;
+ int16_t* high_pass_split_data(int channel);
+ const int16_t* high_pass_split_data(int channel) const;
+ const int16_t* mixed_data(int channel) const;
+ const int16_t* mixed_low_pass_data(int channel) const;
+ const int16_t* low_pass_reference(int channel) const;
- int32_t* analysis_filter_state1(int channel) const;
- int32_t* analysis_filter_state2(int channel) const;
- int32_t* synthesis_filter_state1(int channel) const;
- int32_t* synthesis_filter_state2(int channel) const;
+ // Float versions of the accessors, with automatic conversion back and forth
+ // as necessary. The range of the numbers are the same as for int16_t.
+ float* data_f(int channel);
+ float* low_pass_split_data_f(int channel);
+ float* high_pass_split_data_f(int channel);
+
+ const float* keyboard_data() const;
+
+ SplitFilterStates* filter_states(int channel);
void set_activity(AudioFrame::VADActivity activity);
AudioFrame::VADActivity activity() const;
- bool is_muted() const;
-
+ // Use for int16 interleaved data.
void DeinterleaveFrom(AudioFrame* audioFrame);
void InterleaveTo(AudioFrame* audioFrame) const;
// If |data_changed| is false, only the non-audio data members will be copied
// to |frame|.
void InterleaveTo(AudioFrame* frame, bool data_changed) const;
- void Mix(int num_mixed_channels);
+
+ // Use for float deinterleaved data.
+ void CopyFrom(const float* const* data,
+ int samples_per_channel,
+ AudioProcessing::ChannelLayout layout);
+ void CopyTo(int samples_per_channel,
+ AudioProcessing::ChannelLayout layout,
+ float* const* data);
+
void CopyAndMix(int num_mixed_channels);
void CopyAndMixLowPass(int num_mixed_channels);
void CopyLowPassToReference();
private:
- const int max_num_channels_;
- int num_channels_;
+ // Called from DeinterleaveFrom() and CopyFrom().
+ void InitForNewData();
+
+ const int input_samples_per_channel_;
+ const int num_input_channels_;
+ const int proc_samples_per_channel_;
+ const int num_proc_channels_;
+ const int output_samples_per_channel_;
+ int samples_per_split_channel_;
int num_mixed_channels_;
int num_mixed_low_pass_channels_;
- // Whether the original data was replaced with mixed data.
- bool data_was_mixed_;
- const int samples_per_channel_;
- int samples_per_split_channel_;
bool reference_copied_;
AudioFrame::VADActivity activity_;
- bool is_muted_;
-
- int16_t* data_;
- scoped_array<AudioChannel> channels_;
- scoped_array<SplitAudioChannel> split_channels_;
- scoped_array<AudioChannel> mixed_channels_;
- // TODO(andrew): improve this, we don't need the full 32 kHz space here.
- scoped_array<AudioChannel> mixed_low_pass_channels_;
- scoped_array<AudioChannel> low_pass_reference_channels_;
+
+ const float* keyboard_data_;
+ scoped_ptr<IFChannelBuffer> channels_;
+ scoped_ptr<SplitChannelBuffer> split_channels_;
+ scoped_ptr<SplitFilterStates[]> filter_states_;
+ scoped_ptr<ChannelBuffer<int16_t> > mixed_channels_;
+ scoped_ptr<ChannelBuffer<int16_t> > mixed_low_pass_channels_;
+ scoped_ptr<ChannelBuffer<int16_t> > low_pass_reference_channels_;
+ scoped_ptr<ChannelBuffer<float> > input_buffer_;
+ scoped_ptr<ChannelBuffer<float> > process_buffer_;
+ ScopedVector<PushSincResampler> input_resamplers_;
+ ScopedVector<PushSincResampler> output_resamplers_;
};
+
} // namespace webrtc
-#endif // WEBRTC_MODULES_AUDIO_PROCESSING_MAIN_SOURCE_AUDIO_BUFFER_H_
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_AUDIO_BUFFER_H_