summaryrefslogtreecommitdiffstats
path: root/chromium/content/renderer/media/webrtc_local_audio_track.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/content/renderer/media/webrtc_local_audio_track.cc')
-rw-r--r--chromium/content/renderer/media/webrtc_local_audio_track.cc275
1 files changed, 57 insertions, 218 deletions
diff --git a/chromium/content/renderer/media/webrtc_local_audio_track.cc b/chromium/content/renderer/media/webrtc_local_audio_track.cc
index 8afa06feab2..95f34f64ea3 100644
--- a/chromium/content/renderer/media/webrtc_local_audio_track.cc
+++ b/chromium/content/renderer/media/webrtc_local_audio_track.cc
@@ -5,150 +5,29 @@
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "content/public/renderer/media_stream_audio_sink.h"
+#include "content/renderer/media/media_stream_audio_level_calculator.h"
+#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/media_stream_audio_sink_owner.h"
#include "content/renderer/media/media_stream_audio_track_sink.h"
#include "content/renderer/media/peer_connection_audio_sink_owner.h"
#include "content/renderer/media/webaudio_capturer_source.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_local_audio_source_provider.h"
-#include "media/base/audio_fifo.h"
-#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
namespace content {
-static const size_t kMaxNumberOfBuffersInFifo = 2;
-static const char kAudioTrackKind[] = "audio";
-
-namespace {
-
-using webrtc::MediaConstraintsInterface;
-
-// This helper function checks if any audio constraints are set that require
-// audio processing to be applied. Right now this is a big, single switch for
-// all of the properties, but in the future they'll be handled one by one.
-bool NeedsAudioProcessing(
- const webrtc::MediaConstraintsInterface* constraints) {
- if (!constraints)
- return false;
-
- static const char* kAudioProcessingProperties[] = {
- MediaConstraintsInterface::kEchoCancellation,
- MediaConstraintsInterface::kExperimentalEchoCancellation,
- MediaConstraintsInterface::kAutoGainControl,
- MediaConstraintsInterface::kExperimentalAutoGainControl,
- MediaConstraintsInterface::kNoiseSuppression,
- MediaConstraintsInterface::kHighpassFilter,
- MediaConstraintsInterface::kTypingNoiseDetection,
- };
-
- for (size_t i = 0; i < arraysize(kAudioProcessingProperties); ++i) {
- bool value = false;
- if (webrtc::FindConstraint(constraints, kAudioProcessingProperties[i],
- &value, NULL) &&
- value) {
- return true;
- }
- }
-
- return false;
-}
-
-} // namespace.
-
-// This is a temporary audio buffer with parameters used to send data to
-// callbacks.
-class WebRtcLocalAudioTrack::ConfiguredBuffer {
- public:
- ConfiguredBuffer() {}
- virtual ~ConfiguredBuffer() {}
-
- void Configure(const media::AudioParameters& params) {
- DCHECK(params.IsValid());
-
- // PeerConnection uses 10ms as the sink buffer size as its native packet
- // size. We use the native PeerConnection buffer size to achieve the best
- // performance when a PeerConnection is connected with a track.
- int sink_buffer_size = params.sample_rate() / 100;
- if (params.frames_per_buffer() < sink_buffer_size) {
- // When the source is running with a buffer size smaller than the peer
- // connection buffer size, that means no PeerConnection is connected
- // to the track, use the same buffer size as the incoming format to
- // avoid extra FIFO for WebAudio.
- sink_buffer_size = params.frames_per_buffer();
- }
- params_.Reset(params.format(), params.channel_layout(), params.channels(),
- params.input_channels(), params.sample_rate(),
- params.bits_per_sample(), sink_buffer_size);
-
- audio_wrapper_ = media::AudioBus::Create(params_.channels(),
- params_.frames_per_buffer());
- buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]);
-
- // The size of the FIFO should be at least twice of the source buffer size
- // or twice of the sink buffer size.
- int buffer_size = std::max(
- kMaxNumberOfBuffersInFifo * params.frames_per_buffer(),
- kMaxNumberOfBuffersInFifo * params_.frames_per_buffer());
- fifo_.reset(new media::AudioFifo(params_.channels(), buffer_size));
- }
-
- void Push(media::AudioBus* audio_source) {
- DCHECK(fifo_->frames() + audio_source->frames() <= fifo_->max_frames());
- fifo_->Push(audio_source);
- }
-
- bool Consume() {
- if (fifo_->frames() < audio_wrapper_->frames())
- return false;
-
- fifo_->Consume(audio_wrapper_.get(), 0, audio_wrapper_->frames());
- audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
- params_.bits_per_sample() / 8,
- buffer());
- return true;
- }
-
- int16* buffer() const { return buffer_.get(); }
-
- // Format of the output audio buffer.
- const media::AudioParameters& params() const { return params_; }
-
- private:
- media::AudioParameters params_;
- scoped_ptr<media::AudioBus> audio_wrapper_;
- scoped_ptr<media::AudioFifo> fifo_;
- scoped_ptr<int16[]> buffer_;
-};
-
-scoped_refptr<WebRtcLocalAudioTrack> WebRtcLocalAudioTrack::Create(
- const std::string& id,
- const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints) {
- talk_base::RefCountedObject<WebRtcLocalAudioTrack>* track =
- new talk_base::RefCountedObject<WebRtcLocalAudioTrack>(
- id, capturer, webaudio_source, track_source, constraints);
- return track;
-}
-
WebRtcLocalAudioTrack::WebRtcLocalAudioTrack(
- const std::string& label,
+ WebRtcLocalAudioTrackAdapter* adapter,
const scoped_refptr<WebRtcAudioCapturer>& capturer,
- WebAudioCapturerSource* webaudio_source,
- webrtc::AudioSourceInterface* track_source,
- const webrtc::MediaConstraintsInterface* constraints)
- : webrtc::MediaStreamTrack<webrtc::AudioTrackInterface>(label),
+ WebAudioCapturerSource* webaudio_source)
+ : MediaStreamTrack(adapter, true),
+ adapter_(adapter),
capturer_(capturer),
- webaudio_source_(webaudio_source),
- track_source_(track_source),
- need_audio_processing_(NeedsAudioProcessing(constraints)),
- buffer_(new ConfiguredBuffer()) {
+ webaudio_source_(webaudio_source) {
DCHECK(capturer.get() || webaudio_source);
- if (!webaudio_source_) {
- source_provider_.reset(new WebRtcLocalAudioSourceProvider());
- AddSink(source_provider_.get());
- }
+
+ adapter_->Initialize(this);
+
DVLOG(1) << "WebRtcLocalAudioTrack::WebRtcLocalAudioTrack()";
}
@@ -159,69 +38,58 @@ WebRtcLocalAudioTrack::~WebRtcLocalAudioTrack() {
Stop();
}
-void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
- int audio_delay_milliseconds,
+void WebRtcLocalAudioTrack::Capture(const int16* audio_data,
+ base::TimeDelta delay,
int volume,
- bool key_pressed) {
+ bool key_pressed,
+ bool need_audio_processing) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
+
+ // Calculate the signal level regardless if the track is disabled or enabled.
+ int signal_level = level_calculator_->Calculate(
+ audio_data, audio_parameters_.channels(),
+ audio_parameters_.frames_per_buffer());
+ adapter_->SetSignalLevel(signal_level);
+
scoped_refptr<WebRtcAudioCapturer> capturer;
- std::vector<int> voe_channels;
SinkList::ItemList sinks;
SinkList::ItemList sinks_to_notify_format;
- bool is_webaudio_source = false;
{
base::AutoLock auto_lock(lock_);
capturer = capturer_;
- voe_channels = voe_channels_;
sinks = sinks_.Items();
sinks_.RetrieveAndClearTags(&sinks_to_notify_format);
- is_webaudio_source = (webaudio_source_.get() != NULL);
}
// Notify the tracks on when the format changes. This will do nothing if
// |sinks_to_notify_format| is empty.
for (SinkList::ItemList::const_iterator it = sinks_to_notify_format.begin();
it != sinks_to_notify_format.end(); ++it) {
- (*it)->OnSetFormat(buffer_->params());
+ (*it)->OnSetFormat(audio_parameters_);
}
- // Push the data to the fifo.
- buffer_->Push(audio_source);
-
- // When the source is WebAudio, turn off the audio processing if the delay
- // value is 0 even though the constraint is set to true. In such case, it
- // indicates the data is not from microphone.
- // TODO(xians): remove the flag when supporting one APM per audio track.
- // See crbug/264611 for details.
- bool need_audio_processing = need_audio_processing_;
- if (is_webaudio_source && need_audio_processing)
- need_audio_processing = (audio_delay_milliseconds != 0);
-
- int current_volume = volume;
- while (buffer_->Consume()) {
- // Feed the data to the sinks.
- // TODO (jiayl): we should not pass the real audio data down if the track is
- // disabled. This is currently done so to feed input to WebRTC typing
- // detection and should be changed when audio processing is moved from
- // WebRTC to the track.
- for (SinkList::ItemList::const_iterator it = sinks.begin();
- it != sinks.end();
- ++it) {
- int new_volume = (*it)->OnData(buffer_->buffer(),
- buffer_->params().sample_rate(),
- buffer_->params().channels(),
- buffer_->params().frames_per_buffer(),
- voe_channels,
- audio_delay_milliseconds,
- current_volume,
- need_audio_processing,
- key_pressed);
- if (new_volume != 0 && capturer.get()) {
- // Feed the new volume to WebRtc while changing the volume on the
- // browser.
- capturer->SetVolume(new_volume);
- current_volume = new_volume;
- }
+ // Feed the data to the sinks.
+ // TODO(jiayl): we should not pass the real audio data down if the track is
+ // disabled. This is currently done so to feed input to WebRTC typing
+ // detection and should be changed when audio processing is moved from
+ // WebRTC to the track.
+ std::vector<int> voe_channels = adapter_->VoeChannels();
+ for (SinkList::ItemList::const_iterator it = sinks.begin();
+ it != sinks.end();
+ ++it) {
+ int new_volume = (*it)->OnData(audio_data,
+ audio_parameters_.sample_rate(),
+ audio_parameters_.channels(),
+ audio_parameters_.frames_per_buffer(),
+ voe_channels,
+ delay.InMilliseconds(),
+ volume,
+ need_audio_processing,
+ key_pressed);
+ if (new_volume != 0 && capturer.get() && !webaudio_source_) {
+ // Feed the new volume to WebRtc while changing the volume on the
+ // browser.
+ capturer->SetVolume(new_volume);
}
}
}
@@ -234,49 +102,22 @@ void WebRtcLocalAudioTrack::OnSetFormat(
capture_thread_checker_.DetachFromThread();
DCHECK(capture_thread_checker_.CalledOnValidThread());
- DCHECK(params.IsValid());
- buffer_->Configure(params);
+ audio_parameters_ = params;
+ level_calculator_.reset(new MediaStreamAudioLevelCalculator());
base::AutoLock auto_lock(lock_);
// Remember to notify all sinks of the new format.
sinks_.TagAll();
}
-void WebRtcLocalAudioTrack::AddChannel(int channel_id) {
- DVLOG(1) << "WebRtcLocalAudioTrack::AddChannel(channel_id="
- << channel_id << ")";
- base::AutoLock auto_lock(lock_);
- if (std::find(voe_channels_.begin(), voe_channels_.end(), channel_id) !=
- voe_channels_.end()) {
- // We need to handle the case when the same channel is connected to the
- // track more than once.
- return;
- }
-
- voe_channels_.push_back(channel_id);
-}
-
-void WebRtcLocalAudioTrack::RemoveChannel(int channel_id) {
- DVLOG(1) << "WebRtcLocalAudioTrack::RemoveChannel(channel_id="
- << channel_id << ")";
- base::AutoLock auto_lock(lock_);
- std::vector<int>::iterator iter =
- std::find(voe_channels_.begin(), voe_channels_.end(), channel_id);
- DCHECK(iter != voe_channels_.end());
- voe_channels_.erase(iter);
-}
-
-// webrtc::AudioTrackInterface implementation.
-webrtc::AudioSourceInterface* WebRtcLocalAudioTrack::GetSource() const {
- return track_source_;
-}
-
-cricket::AudioRenderer* WebRtcLocalAudioTrack::GetRenderer() {
- return this;
-}
-
-std::string WebRtcLocalAudioTrack::kind() const {
- return kAudioTrackKind;
+void WebRtcLocalAudioTrack::SetAudioProcessor(
+ const scoped_refptr<MediaStreamAudioProcessor>& processor) {
+ // if the |processor| does not have audio processing, which can happen if
+ // kDisableAudioTrackProcessing is set set or all the constraints in
+ // the |processor| are turned off. In such case, we pass NULL to the
+ // adapter to indicate that no stats can be gotten from the processor.
+ adapter_->SetAudioProcessor(processor->has_audio_processing() ?
+ processor : NULL);
}
void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
@@ -354,11 +195,9 @@ void WebRtcLocalAudioTrack::Start() {
// capturer as its sink otherwise two streams in different clock will be
// pushed through the same track.
webaudio_source_->Start(this, capturer_.get());
- return;
- }
-
- if (capturer_.get())
+ } else if (capturer_.get()) {
capturer_->AddTrack(this);
+ }
SinkList::ItemList sinks;
{