summaryrefslogtreecommitdiffstats
path: root/chromium/media/audio
diff options
context:
space:
mode:
authorJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-08 14:30:41 +0200
committerJocelyn Turcotte <jocelyn.turcotte@digia.com>2014-08-12 13:49:54 +0200
commitab0a50979b9eb4dfa3320eff7e187e41efedf7a9 (patch)
tree498dfb8a97ff3361a9f7486863a52bb4e26bb898 /chromium/media/audio
parent4ce69f7403811819800e7c5ae1318b2647e778d1 (diff)
Update Chromium to beta version 37.0.2062.68
Change-Id: I188e3b5aff1bec75566014291b654eb19f5bc8ca Reviewed-by: Andras Becsi <andras.becsi@digia.com>
Diffstat (limited to 'chromium/media/audio')
-rw-r--r--chromium/media/audio/agc_audio_stream.h4
-rw-r--r--chromium/media/audio/alsa/alsa_input.cc20
-rw-r--r--chromium/media/audio/alsa/alsa_input.h5
-rw-r--r--chromium/media/audio/alsa/alsa_output.cc11
-rw-r--r--chromium/media/audio/alsa/alsa_output.h9
-rw-r--r--chromium/media/audio/alsa/alsa_output_unittest.cc21
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.cc16
-rw-r--r--chromium/media/audio/alsa/audio_manager_alsa.h3
-rw-r--r--chromium/media/audio/android/audio_android_unittest.cc533
-rw-r--r--chromium/media/audio/android/audio_manager_android.cc209
-rw-r--r--chromium/media/audio/android/audio_manager_android.h28
-rw-r--r--chromium/media/audio/android/audio_record_input.cc18
-rw-r--r--chromium/media/audio/android/audio_record_input.h4
-rw-r--r--chromium/media/audio/android/opensles_input.cc24
-rw-r--r--chromium/media/audio/android/opensles_input.h3
-rw-r--r--chromium/media/audio/android/opensles_output.cc11
-rw-r--r--chromium/media/audio/android/opensles_output.h7
-rw-r--r--chromium/media/audio/audio_input_controller.cc260
-rw-r--r--chromium/media/audio/audio_input_controller.h99
-rw-r--r--chromium/media/audio/audio_input_controller_unittest.cc33
-rw-r--r--chromium/media/audio/audio_input_device.cc61
-rw-r--r--chromium/media/audio/audio_input_device.h9
-rw-r--r--chromium/media/audio/audio_input_ipc.h3
-rw-r--r--chromium/media/audio/audio_input_unittest.cc252
-rw-r--r--chromium/media/audio/audio_io.h23
-rw-r--r--chromium/media/audio/audio_logging.h8
-rw-r--r--chromium/media/audio/audio_low_latency_input_output_unittest.cc31
-rw-r--r--chromium/media/audio/audio_manager.h38
-rw-r--r--chromium/media/audio/audio_manager_base.cc112
-rw-r--r--chromium/media/audio/audio_manager_base.h31
-rw-r--r--chromium/media/audio/audio_manager_unittest.cc45
-rw-r--r--chromium/media/audio/audio_output_controller.cc94
-rw-r--r--chromium/media/audio/audio_output_controller.h53
-rw-r--r--chromium/media/audio/audio_output_controller_unittest.cc20
-rw-r--r--chromium/media/audio/audio_output_device.cc64
-rw-r--r--chromium/media/audio/audio_output_device.h15
-rw-r--r--chromium/media/audio/audio_output_device_unittest.cc41
-rw-r--r--chromium/media/audio/audio_output_dispatcher.cc16
-rw-r--r--chromium/media/audio/audio_output_dispatcher.h20
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.cc45
-rw-r--r--chromium/media/audio/audio_output_dispatcher_impl.h4
-rw-r--r--chromium/media/audio/audio_output_ipc.h3
-rw-r--r--chromium/media/audio/audio_output_proxy_unittest.cc136
-rw-r--r--chromium/media/audio/audio_output_resampler.cc106
-rw-r--r--chromium/media/audio/audio_output_resampler.h3
-rw-r--r--chromium/media/audio/audio_parameters.cc2
-rw-r--r--chromium/media/audio/audio_parameters.h4
-rw-r--r--chromium/media/audio/audio_parameters_unittest.cc38
-rw-r--r--chromium/media/audio/clockless_audio_sink.cc16
-rw-r--r--chromium/media/audio/clockless_audio_sink.h2
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.cc47
-rw-r--r--chromium/media/audio/cras/audio_manager_cras.h7
-rw-r--r--chromium/media/audio/cras/cras_input.cc23
-rw-r--r--chromium/media/audio/cras/cras_input.h8
-rw-r--r--chromium/media/audio/cras/cras_input_unittest.cc29
-rw-r--r--chromium/media/audio/cras/cras_unified.cc11
-rw-r--r--chromium/media/audio/cras/cras_unified.h10
-rw-r--r--chromium/media/audio/cras/cras_unified_unittest.cc29
-rw-r--r--chromium/media/audio/fake_audio_consumer.cc29
-rw-r--r--chromium/media/audio/fake_audio_consumer.h13
-rw-r--r--chromium/media/audio/fake_audio_input_stream.cc72
-rw-r--r--chromium/media/audio/fake_audio_input_stream.h3
-rw-r--r--chromium/media/audio/fake_audio_log_factory.cc3
-rw-r--r--chromium/media/audio/fake_audio_manager.cc3
-rw-r--r--chromium/media/audio/fake_audio_manager.h3
-rw-r--r--chromium/media/audio/fake_audio_output_stream.cc14
-rw-r--r--chromium/media/audio/linux/audio_manager_linux.cc8
-rw-r--r--chromium/media/audio/mac/aggregate_device_manager.cc371
-rw-r--r--chromium/media/audio/mac/aggregate_device_manager.h58
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.cc215
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac.h21
-rw-r--r--chromium/media/audio/mac/audio_auhal_mac_unittest.cc231
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac.cc1
-rw-r--r--chromium/media/audio/mac/audio_device_listener_mac_unittest.cc13
-rw-r--r--chromium/media/audio/mac/audio_input_mac.cc44
-rw-r--r--chromium/media/audio/mac/audio_input_mac.h12
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.cc88
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac.h10
-rw-r--r--chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc83
-rw-r--r--chromium/media/audio/mac/audio_low_latency_output_mac.cc416
-rw-r--r--chromium/media/audio/mac/audio_low_latency_output_mac.h115
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.cc402
-rw-r--r--chromium/media/audio/mac/audio_manager_mac.h39
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.cc976
-rw-r--r--chromium/media/audio/mac/audio_synchronized_mac.h216
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.cc397
-rw-r--r--chromium/media/audio/mac/audio_unified_mac.h100
-rw-r--r--chromium/media/audio/mock_audio_manager.cc32
-rw-r--r--chromium/media/audio/mock_audio_manager.h18
-rw-r--r--chromium/media/audio/mock_audio_source_callback.cc12
-rw-r--r--chromium/media/audio/mock_audio_source_callback.h28
-rw-r--r--chromium/media/audio/null_audio_sink.cc18
-rw-r--r--chromium/media/audio/null_audio_sink.h6
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.cc9
-rw-r--r--chromium/media/audio/openbsd/audio_manager_openbsd.h3
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.cc53
-rw-r--r--chromium/media/audio/pulse/audio_manager_pulse.h5
-rw-r--r--chromium/media/audio/pulse/pulse.sigs12
-rw-r--r--chromium/media/audio/pulse/pulse_input.cc13
-rw-r--r--chromium/media/audio/pulse/pulse_input.h2
-rw-r--r--chromium/media/audio/pulse/pulse_output.cc20
-rw-r--r--chromium/media/audio/pulse/pulse_output.h9
-rw-r--r--chromium/media/audio/pulse/pulse_unified.cc292
-rw-r--r--chromium/media/audio/pulse/pulse_unified.h90
-rw-r--r--chromium/media/audio/pulse/pulse_util.cc13
-rw-r--r--chromium/media/audio/pulse/pulse_util.h1
-rw-r--r--chromium/media/audio/sample_rates.cc51
-rw-r--r--chromium/media/audio/sample_rates.h7
-rw-r--r--chromium/media/audio/scoped_loop_observer.h50
-rw-r--r--chromium/media/audio/scoped_task_runner_observer.cc (renamed from chromium/media/audio/scoped_loop_observer.cc)21
-rw-r--r--chromium/media/audio/scoped_task_runner_observer.h52
-rw-r--r--chromium/media/audio/simple_sources.cc6
-rw-r--r--chromium/media/audio/simple_sources.h3
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.cc127
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler.h10
-rw-r--r--chromium/media/audio/sounds/audio_stream_handler_unittest.cc8
-rw-r--r--chromium/media/audio/sounds/sounds_manager.cc71
-rw-r--r--chromium/media/audio/sounds/sounds_manager.h4
-rw-r--r--chromium/media/audio/sounds/sounds_manager_unittest.cc2
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.cc34
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler.h14
-rw-r--r--chromium/media/audio/sounds/wav_audio_handler_unittest.cc20
-rw-r--r--chromium/media/audio/test_audio_input_controller_factory.cc4
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.cc28
-rw-r--r--chromium/media/audio/virtual_audio_input_stream.h12
-rw-r--r--chromium/media/audio/virtual_audio_input_stream_unittest.cc52
-rw-r--r--chromium/media/audio/virtual_audio_output_stream_unittest.cc30
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.cc69
-rw-r--r--chromium/media/audio/win/audio_device_listener_win.h3
-rw-r--r--chromium/media/audio/win/audio_device_listener_win_unittest.cc2
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.cc150
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win.h27
-rw-r--r--chromium/media/audio/win/audio_low_latency_input_win_unittest.cc149
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.cc60
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win.h5
-rw-r--r--chromium/media/audio/win/audio_low_latency_output_win_unittest.cc41
-rw-r--r--chromium/media/audio/win/audio_manager_win.cc127
-rw-r--r--chromium/media/audio/win/audio_manager_win.h15
-rw-r--r--chromium/media/audio/win/audio_output_win_unittest.cc79
-rw-r--r--chromium/media/audio/win/audio_unified_win.cc984
-rw-r--r--chromium/media/audio/win/audio_unified_win.h352
-rw-r--r--chromium/media/audio/win/audio_unified_win_unittest.cc366
-rw-r--r--chromium/media/audio/win/core_audio_util_win.cc47
-rw-r--r--chromium/media/audio/win/core_audio_util_win.h4
-rw-r--r--chromium/media/audio/win/core_audio_util_win_unittest.cc2
-rw-r--r--chromium/media/audio/win/device_enumeration_win.cc11
-rw-r--r--chromium/media/audio/win/wavein_input_win.cc21
-rw-r--r--chromium/media/audio/win/wavein_input_win.h5
148 files changed, 2987 insertions, 7507 deletions
diff --git a/chromium/media/audio/agc_audio_stream.h b/chromium/media/audio/agc_audio_stream.h
index b289a0b15e9..940d96412c2 100644
--- a/chromium/media/audio/agc_audio_stream.h
+++ b/chromium/media/audio/agc_audio_stream.h
@@ -73,12 +73,10 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
AgcAudioStream()
: agc_is_enabled_(false), max_volume_(0.0), normalized_volume_(0.0) {
- DVLOG(1) << __FUNCTION__;
}
virtual ~AgcAudioStream() {
DCHECK(thread_checker_.CalledOnValidThread());
- DVLOG(1) << __FUNCTION__;
}
protected:
@@ -87,7 +85,6 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// The timer is only started if AGC mode is first enabled using the
// SetAutomaticGainControl() method.
void StartAgc() {
- DVLOG(1) << "StartAgc()";
DCHECK(thread_checker_.CalledOnValidThread());
if (!agc_is_enabled_ || timer_.IsRunning())
return;
@@ -105,7 +102,6 @@ class MEDIA_EXPORT AgcAudioStream : public AudioInterface {
// Stops the periodic timer which periodically checks and updates the
// current microphone volume level.
void StopAgc() {
- DVLOG(1) << "StopAgc()";
DCHECK(thread_checker_.CalledOnValidThread());
if (timer_.IsRunning())
timer_.Stop();
diff --git a/chromium/media/audio/alsa/alsa_input.cc b/chromium/media/audio/alsa/alsa_input.cc
index 9dcbf2b8662..0bc9f314d45 100644
--- a/chromium/media/audio/alsa/alsa_input.cc
+++ b/chromium/media/audio/alsa/alsa_input.cc
@@ -32,7 +32,8 @@ AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerBase* audio_manager,
device_name_(device_name),
params_(params),
bytes_per_buffer_(params.frames_per_buffer() *
- (params.channels() * params.bits_per_sample()) / 8),
+ (params.channels() * params.bits_per_sample()) /
+ 8),
wrapper_(wrapper),
buffer_duration_(base::TimeDelta::FromMicroseconds(
params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
@@ -41,8 +42,9 @@ AlsaPcmInputStream::AlsaPcmInputStream(AudioManagerBase* audio_manager,
device_handle_(NULL),
mixer_handle_(NULL),
mixer_element_handle_(NULL),
- weak_factory_(this),
- read_callback_behind_schedule_(false) {
+ read_callback_behind_schedule_(false),
+ audio_bus_(AudioBus::Create(params)),
+ weak_factory_(this) {
}
AlsaPcmInputStream::~AlsaPcmInputStream() {}
@@ -208,8 +210,11 @@ void AlsaPcmInputStream::ReadAudio() {
int frames_read = wrapper_->PcmReadi(device_handle_, audio_buffer_.get(),
params_.frames_per_buffer());
if (frames_read == params_.frames_per_buffer()) {
- callback_->OnData(this, audio_buffer_.get(), bytes_per_buffer_,
- hardware_delay_bytes, normalized_volume);
+ audio_bus_->FromInterleaved(audio_buffer_.get(),
+ audio_bus_->frames(),
+ params_.bits_per_sample() / 8);
+ callback_->OnData(
+ this, audio_bus_.get(), hardware_delay_bytes, normalized_volume);
} else {
LOG(WARNING) << "PcmReadi returning less than expected frames: "
<< frames_read << " vs. " << params_.frames_per_buffer()
@@ -245,6 +250,8 @@ void AlsaPcmInputStream::Stop() {
int error = wrapper_->PcmDrop(device_handle_);
if (error < 0)
HandleError("PcmDrop", error);
+
+ callback_ = NULL;
}
void AlsaPcmInputStream::Close() {
@@ -261,9 +268,6 @@ void AlsaPcmInputStream::Close() {
device_handle_ = NULL;
mixer_handle_ = NULL;
mixer_element_handle_ = NULL;
-
- if (callback_)
- callback_->OnClose(this);
}
audio_manager_->ReleaseInputStream(this);
diff --git a/chromium/media/audio/alsa/alsa_input.h b/chromium/media/audio/alsa/alsa_input.h
index 6e9aad9056e..c26f3e2b66e 100644
--- a/chromium/media/audio/alsa/alsa_input.h
+++ b/chromium/media/audio/alsa/alsa_input.h
@@ -80,9 +80,12 @@ class AlsaPcmInputStream : public AgcAudioStream<AudioInputStream> {
snd_pcm_t* device_handle_; // Handle to the ALSA PCM recording device.
snd_mixer_t* mixer_handle_; // Handle to the ALSA microphone mixer.
snd_mixer_elem_t* mixer_element_handle_; // Handle to the capture element.
- base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
scoped_ptr<uint8[]> audio_buffer_; // Buffer used for reading audio data.
bool read_callback_behind_schedule_;
+ scoped_ptr<AudioBus> audio_bus_;
+
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<AlsaPcmInputStream> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(AlsaPcmInputStream);
};
diff --git a/chromium/media/audio/alsa/alsa_output.cc b/chromium/media/audio/alsa/alsa_output.cc
index eccf8ee28a8..690d738f195 100644
--- a/chromium/media/audio/alsa/alsa_output.cc
+++ b/chromium/media/audio/alsa/alsa_output.cc
@@ -39,7 +39,6 @@
#include "base/bind.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
#include "base/stl_util.h"
#include "base/time/time.h"
#include "media/audio/alsa/alsa_util.h"
@@ -154,12 +153,12 @@ AlsaPcmOutputStream::AlsaPcmOutputStream(const std::string& device_name,
message_loop_(base::MessageLoop::current()),
playback_handle_(NULL),
frames_per_packet_(packet_size_ / bytes_per_frame_),
- weak_factory_(this),
state_(kCreated),
volume_(1.0f),
source_callback_(NULL),
- audio_bus_(AudioBus::Create(params)) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ audio_bus_(AudioBus::Create(params)),
+ weak_factory_(this) {
+ DCHECK(manager_->GetTaskRunner()->BelongsToCurrentThread());
DCHECK_EQ(audio_bus_->frames() * bytes_per_frame_, packet_size_);
// Sanity check input values.
@@ -536,13 +535,13 @@ std::string AlsaPcmOutputStream::FindDeviceForChannels(uint32 channels) {
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
// Only examine devices that are output capable.. Valid values are
// "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(
+ scoped_ptr<char, base::FreeDeleter> io(
wrapper_->DeviceNameGetHint(*hint_iter, kIoHintName));
if (io != NULL && strcmp(io.get(), "Input") == 0)
continue;
// Attempt to select the closest device for number of channels.
- scoped_ptr_malloc<char> name(
+ scoped_ptr<char, base::FreeDeleter> name(
wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
if (strncmp(wanted_device, name.get(), strlen(wanted_device)) == 0) {
guessed_device = name.get();
diff --git a/chromium/media/audio/alsa/alsa_output.h b/chromium/media/audio/alsa/alsa_output.h
index 65a23f75124..180564584c2 100644
--- a/chromium/media/audio/alsa/alsa_output.h
+++ b/chromium/media/audio/alsa/alsa_output.h
@@ -201,10 +201,6 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
scoped_ptr<media::SeekableBuffer> buffer_;
uint32 frames_per_packet_;
- // Allows us to run tasks on the AlsaPcmOutputStream instance which are
- // bound by its lifetime.
- base::WeakPtrFactory<AlsaPcmOutputStream> weak_factory_;
-
InternalState state_;
float volume_; // Volume level from 0.0 to 1.0.
@@ -217,6 +213,11 @@ class MEDIA_EXPORT AlsaPcmOutputStream : public AudioOutputStream {
scoped_ptr<ChannelMixer> channel_mixer_;
scoped_ptr<AudioBus> mixed_audio_bus_;
+ // Allows us to run tasks on the AlsaPcmOutputStream instance which are
+ // bound by its lifetime.
+ // NOTE: Weak pointers must be invalidated before all other member variables.
+ base::WeakPtrFactory<AlsaPcmOutputStream> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(AlsaPcmOutputStream);
};
diff --git a/chromium/media/audio/alsa/alsa_output_unittest.cc b/chromium/media/audio/alsa/alsa_output_unittest.cc
index 99ae8b02e0a..8b0aeaea4c6 100644
--- a/chromium/media/audio/alsa/alsa_output_unittest.cc
+++ b/chromium/media/audio/alsa/alsa_output_unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
#include "media/audio/alsa/alsa_output.h"
#include "media/audio/alsa/alsa_wrapper.h"
#include "media/audio/alsa/audio_manager_alsa.h"
#include "media/audio/fake_audio_log_factory.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/base/data_buffer.h"
#include "media/base/seekable_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -67,16 +67,6 @@ class MockAlsaWrapper : public AlsaWrapper {
MOCK_METHOD1(StrError, const char*(int errnum));
};
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
class MockAudioManagerAlsa : public AudioManagerAlsa {
public:
MockAudioManagerAlsa() : AudioManagerAlsa(&fake_audio_log_factory_) {}
@@ -85,10 +75,9 @@ class MockAudioManagerAlsa : public AudioManagerAlsa {
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
+ MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id));
+ const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
@@ -102,8 +91,8 @@ class MockAudioManagerAlsa : public AudioManagerAlsa {
}
// We don't mock this method since all tests will do the same thing
- // and use the current message loop.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
+ // and use the current task runner.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE {
return base::MessageLoop::current()->message_loop_proxy();
}
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.cc b/chromium/media/audio/alsa/audio_manager_alsa.cc
index ac61a5fa974..beb60bad88b 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.cc
+++ b/chromium/media/audio/alsa/audio_manager_alsa.cc
@@ -152,8 +152,8 @@ void AudioManagerAlsa::GetAlsaDevicesInfo(
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
// Only examine devices of the right type. Valid values are
// "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
+ scoped_ptr<char, base::FreeDeleter> io(wrapper_->DeviceNameGetHint(
+ *hint_iter, kIoHintName));
if (io != NULL && strcmp(unwanted_device_type, io.get()) == 0)
continue;
@@ -169,13 +169,13 @@ void AudioManagerAlsa::GetAlsaDevicesInfo(
}
// Get the unique device name for the device.
- scoped_ptr_malloc<char> unique_device_name(
+ scoped_ptr<char, base::FreeDeleter> unique_device_name(
wrapper_->DeviceNameGetHint(*hint_iter, kNameHintName));
// Find out if the device is available.
if (IsAlsaDeviceAvailable(type, unique_device_name.get())) {
// Get the description for the device.
- scoped_ptr_malloc<char> desc(wrapper_->DeviceNameGetHint(
+ scoped_ptr<char, base::FreeDeleter> desc(wrapper_->DeviceNameGetHint(
*hint_iter, kDescriptionHintName));
media::AudioDeviceName name;
@@ -252,8 +252,8 @@ bool AudioManagerAlsa::HasAnyAlsaAudioDevice(
for (void** hint_iter = hints; *hint_iter != NULL; hint_iter++) {
// Only examine devices that are |stream| capable. Valid values are
// "Input", "Output", and NULL which means both input and output.
- scoped_ptr_malloc<char> io(wrapper_->DeviceNameGetHint(*hint_iter,
- kIoHintName));
+ scoped_ptr<char, base::FreeDeleter> io(wrapper_->DeviceNameGetHint(
+ *hint_iter, kIoHintName));
const char* unwanted_type = UnwantedDeviceTypeWhenEnumerating(stream);
if (io != NULL && strcmp(unwanted_type, io.get()) == 0)
continue; // Wrong type, skip the device.
@@ -283,11 +283,9 @@ AudioOutputStream* AudioManagerAlsa::MakeLinearOutputStream(
AudioOutputStream* AudioManagerAlsa::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- // TODO(xians): Use input_device_id for unified IO.
return MakeOutputStream(params);
}
diff --git a/chromium/media/audio/alsa/audio_manager_alsa.h b/chromium/media/audio/alsa/audio_manager_alsa.h
index 155089f06bc..d08c3ba1f27 100644
--- a/chromium/media/audio/alsa/audio_manager_alsa.h
+++ b/chromium/media/audio/alsa/audio_manager_alsa.h
@@ -37,8 +37,7 @@ class MEDIA_EXPORT AudioManagerAlsa : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/chromium/media/audio/android/audio_android_unittest.cc b/chromium/media/audio/android/audio_android_unittest.cc
index e7913265269..a356d9c25de 100644
--- a/chromium/media/audio/android/audio_android_unittest.cc
+++ b/chromium/media/audio/android/audio_android_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
+#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
@@ -17,6 +18,7 @@
#include "media/audio/android/audio_manager_android.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/base/decoder_buffer.h"
#include "media/base/seekable_buffer.h"
#include "media/base/test_data_util.h"
@@ -85,6 +87,47 @@ static double ExpectedTimeBetweenCallbacks(AudioParameters params) {
static_cast<double>(params.sample_rate()))).InMillisecondsF();
}
+// Helper method which verifies that the device list starts with a valid
+// default device name followed by non-default device names.
+static void CheckDeviceNames(const AudioDeviceNames& device_names) {
+ VLOG(2) << "Got " << device_names.size() << " audio devices.";
+ if (device_names.empty()) {
+ // Log a warning so we can see the status on the build bots. No need to
+ // break the test though since this does successfully test the code and
+ // some failure cases.
+ LOG(WARNING) << "No input devices detected";
+ return;
+ }
+
+ AudioDeviceNames::const_iterator it = device_names.begin();
+
+ // The first device in the list should always be the default device.
+ EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceName),
+ it->device_name);
+ EXPECT_EQ(std::string(AudioManagerBase::kDefaultDeviceId), it->unique_id);
+ ++it;
+
+ // Other devices should have non-empty name and id and should not contain
+ // default name or id.
+ while (it != device_names.end()) {
+ EXPECT_FALSE(it->device_name.empty());
+ EXPECT_FALSE(it->unique_id.empty());
+ VLOG(2) << "Device ID(" << it->unique_id
+ << "), label: " << it->device_name;
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceName),
+ it->device_name);
+ EXPECT_NE(std::string(AudioManagerBase::kDefaultDeviceId),
+ it->unique_id);
+ ++it;
+ }
+}
+
+// We clear the data bus to ensure that the test does not cause noise.
+static int RealOnMoreData(AudioBus* dest, AudioBuffersState buffers_state) {
+ dest->Zero();
+ return dest->frames();
+}
+
std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
using namespace std;
os << endl << "format: " << FormatToString(params.format()) << endl
@@ -105,34 +148,14 @@ std::ostream& operator<<(std::ostream& os, const AudioParameters& params) {
// Gmock implementation of AudioInputStream::AudioInputCallback.
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData,
+ MOCK_METHOD4(OnData,
void(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
-// Gmock implementation of AudioOutputStream::AudioSourceCallback.
-class MockAudioOutputCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData,
- int(AudioBus* dest, AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData,
- int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-
- // We clear the data bus to ensure that the test does not cause noise.
- int RealOnMoreData(AudioBus* dest, AudioBuffersState buffers_state) {
- dest->Zero();
- return dest->frames();
- }
-};
-
// Implements AudioOutputStream::AudioSourceCallback and provides audio data
// by reading from a data file.
class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
@@ -183,13 +206,6 @@ class FileAudioSource : public AudioOutputStream::AudioSourceCallback {
return frames;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
int file_size() { return file_->data_size(); }
@@ -247,18 +263,22 @@ class FileAudioSink : public AudioInputStream::AudioInputCallback {
// AudioInputStream::AudioInputCallback implementation.
virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+
// Store data data in a temporary buffer to avoid making blocking
// fwrite() calls in the audio callback. The complete buffer will be
// written to file in the destructor.
- if (!buffer_->Append(src, size))
+ const int size = bytes_per_sample * num_samples;
+ if (!buffer_->Append((const uint8*)interleaved.get(), size))
event_->Signal();
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
private:
@@ -291,13 +311,19 @@ class FullDuplexAudioSinkSource
// AudioInputStream::AudioInputCallback implementation
virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
const base::TimeTicks now_time = base::TimeTicks::Now();
const int diff = (now_time - previous_time_).InMilliseconds();
+ EXPECT_EQ(params_.bits_per_sample(), 16);
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+ const int size = bytes_per_sample * num_samples;
+
base::AutoLock lock(lock_);
if (diff > 1000) {
started_ = true;
@@ -318,13 +344,12 @@ class FullDuplexAudioSinkSource
// Append new data to the FIFO and extend the size if the max capacity
// was exceeded. Flush the FIFO when extended just in case.
- if (!fifo_->Append(src, size)) {
+ if (!fifo_->Append((const uint8*)interleaved.get(), size)) {
fifo_->set_forward_capacity(2 * fifo_->forward_capacity());
fifo_->Clear();
}
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
// AudioOutputStream::AudioSourceCallback implementation
@@ -357,13 +382,6 @@ class FullDuplexAudioSinkSource
return dest->frames();
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
private:
@@ -389,21 +407,76 @@ class FullDuplexAudioSinkSource
// Test fixture class for tests which only exercise the output path.
class AudioAndroidOutputTest : public testing::Test {
public:
- AudioAndroidOutputTest() {}
-
- protected:
- virtual void SetUp() {
- audio_manager_.reset(AudioManager::CreateForTesting());
- loop_.reset(new base::MessageLoopForUI());
+ AudioAndroidOutputTest()
+ : loop_(new base::MessageLoopForUI()),
+ audio_manager_(AudioManager::CreateForTesting()),
+ audio_output_stream_(NULL) {
}
- virtual void TearDown() {}
+ virtual ~AudioAndroidOutputTest() {
+ }
+ protected:
AudioManager* audio_manager() { return audio_manager_.get(); }
base::MessageLoopForUI* loop() { return loop_.get(); }
+ const AudioParameters& audio_output_parameters() {
+ return audio_output_parameters_;
+ }
- AudioParameters GetDefaultOutputStreamParameters() {
- return audio_manager()->GetDefaultOutputStreamParameters();
+ // Synchronously runs the provided callback/closure on the audio thread.
+ void RunOnAudioThread(const base::Closure& closure) {
+ if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ base::WaitableEvent event(false, false);
+ audio_manager()->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioAndroidOutputTest::RunOnAudioThreadImpl,
+ base::Unretained(this),
+ closure,
+ &event));
+ event.Wait();
+ } else {
+ closure.Run();
+ }
+ }
+
+ void RunOnAudioThreadImpl(const base::Closure& closure,
+ base::WaitableEvent* event) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ closure.Run();
+ event->Signal();
+ }
+
+ void GetDefaultOutputStreamParametersOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::GetDefaultOutputStreamParameters,
+ base::Unretained(this)));
+ }
+
+ void MakeAudioOutputStreamOnAudioThread(const AudioParameters& params) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::MakeOutputStream,
+ base::Unretained(this),
+ params));
+ }
+
+ void OpenAndCloseAudioOutputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::OpenAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenAndStartAudioOutputStreamOnAudioThread(
+ AudioOutputStream::AudioSourceCallback* source) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::OpenAndStart,
+ base::Unretained(this),
+ source));
+ }
+
+ void StopAndCloseAudioOutputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidOutputTest::StopAndClose,
+ base::Unretained(this)));
}
double AverageTimeBetweenCallbacks(int num_callbacks) const {
@@ -416,28 +489,25 @@ class AudioAndroidOutputTest : public testing::Test {
ExpectedTimeBetweenCallbacks(params);
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
- AudioOutputStream* stream = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(stream);
+ MakeAudioOutputStreamOnAudioThread(params);
int count = 0;
- MockAudioOutputCallback source;
+ MockAudioSourceCallback source;
EXPECT_CALL(source, OnMoreData(NotNull(), _))
.Times(AtLeast(num_callbacks))
.WillRepeatedly(
DoAll(CheckCountAndPostQuitTask(&count, num_callbacks, loop()),
- Invoke(&source, &MockAudioOutputCallback::RealOnMoreData)));
- EXPECT_CALL(source, OnError(stream)).Times(0);
- EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+ Invoke(RealOnMoreData)));
+ EXPECT_CALL(source, OnError(audio_output_stream_)).Times(0);
+
+ OpenAndStartAudioOutputStreamOnAudioThread(&source);
- EXPECT_TRUE(stream->Open());
- stream->Start(&source);
start_time_ = base::TimeTicks::Now();
loop()->Run();
end_time_ = base::TimeTicks::Now();
- stream->Stop();
- stream->Close();
+
+ StopAndCloseAudioOutputStreamOnAudioThread();
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
@@ -448,11 +518,47 @@ class AudioAndroidOutputTest : public testing::Test {
EXPECT_GE(average_time_between_callbacks_ms,
0.70 * expected_time_between_callbacks_ms);
EXPECT_LE(average_time_between_callbacks_ms,
- 1.30 * expected_time_between_callbacks_ms);
+ 1.35 * expected_time_between_callbacks_ms);
+ }
+
+ void GetDefaultOutputStreamParameters() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_output_parameters_ =
+ audio_manager()->GetDefaultOutputStreamParameters();
+ EXPECT_TRUE(audio_output_parameters_.IsValid());
+ }
+
+ void MakeOutputStream(const AudioParameters& params) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_output_stream_ = audio_manager()->MakeAudioOutputStream(
+ params, std::string());
+ EXPECT_TRUE(audio_output_stream_);
+ }
+
+ void OpenAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_output_stream_->Open());
+ audio_output_stream_->Close();
+ audio_output_stream_ = NULL;
+ }
+
+ void OpenAndStart(AudioOutputStream::AudioSourceCallback* source) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_output_stream_->Open());
+ audio_output_stream_->Start(source);
+ }
+
+ void StopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_output_stream_->Stop();
+ audio_output_stream_->Close();
+ audio_output_stream_ = NULL;
}
scoped_ptr<base::MessageLoopForUI> loop_;
scoped_ptr<AudioManager> audio_manager_;
+ AudioParameters audio_output_parameters_;
+ AudioOutputStream* audio_output_stream_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
@@ -476,53 +582,87 @@ std::vector<bool> RunAudioRecordInputPathTests() {
class AudioAndroidInputTest : public AudioAndroidOutputTest,
public testing::WithParamInterface<bool> {
public:
- AudioAndroidInputTest() {}
+ AudioAndroidInputTest() : audio_input_stream_(NULL) {}
protected:
+ const AudioParameters& audio_input_parameters() {
+ return audio_input_parameters_;
+ }
+
AudioParameters GetInputStreamParameters() {
- AudioParameters input_params = audio_manager()->GetInputStreamParameters(
- AudioManagerBase::kDefaultDeviceId);
+ GetDefaultInputStreamParametersOnAudioThread();
+
// Override the platform effects setting to use the AudioRecord or OpenSLES
// path as requested.
int effects = GetParam() ? AudioParameters::ECHO_CANCELLER :
AudioParameters::NO_EFFECTS;
- AudioParameters params(input_params.format(),
- input_params.channel_layout(),
- input_params.input_channels(),
- input_params.sample_rate(),
- input_params.bits_per_sample(),
- input_params.frames_per_buffer(),
+ AudioParameters params(audio_input_parameters().format(),
+ audio_input_parameters().channel_layout(),
+ audio_input_parameters().input_channels(),
+ audio_input_parameters().sample_rate(),
+ audio_input_parameters().bits_per_sample(),
+ audio_input_parameters().frames_per_buffer(),
effects);
return params;
}
+ void GetDefaultInputStreamParametersOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::GetDefaultInputStreamParameters,
+ base::Unretained(this)));
+ }
+
+ void MakeAudioInputStreamOnAudioThread(const AudioParameters& params) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::MakeInputStream,
+ base::Unretained(this),
+ params));
+ }
+
+ void OpenAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::OpenAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenAndStartAudioInputStreamOnAudioThread(
+ AudioInputStream::AudioInputCallback* sink) {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::OpenAndStart,
+ base::Unretained(this),
+ sink));
+ }
+
+ void StopAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioAndroidInputTest::StopAndClose,
+ base::Unretained(this)));
+ }
+
void StartInputStreamCallbacks(const AudioParameters& params) {
double expected_time_between_callbacks_ms =
ExpectedTimeBetweenCallbacks(params);
const int num_callbacks =
(kCallbackTestTimeMs / expected_time_between_callbacks_ms);
- AudioInputStream* stream = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(stream);
+
+ MakeAudioInputStreamOnAudioThread(params);
int count = 0;
MockAudioInputCallback sink;
- EXPECT_CALL(sink,
- OnData(stream, NotNull(), params.GetBytesPerBuffer(), _, _))
+ EXPECT_CALL(sink, OnData(audio_input_stream_, NotNull(), _, _))
.Times(AtLeast(num_callbacks))
.WillRepeatedly(
- CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
- EXPECT_CALL(sink, OnError(stream)).Times(0);
- EXPECT_CALL(sink, OnClose(stream)).Times(1);
+ CheckCountAndPostQuitTask(&count, num_callbacks, loop()));
+ EXPECT_CALL(sink, OnError(audio_input_stream_)).Times(0);
+
+ OpenAndStartAudioInputStreamOnAudioThread(&sink);
- EXPECT_TRUE(stream->Open());
- stream->Start(&sink);
start_time_ = base::TimeTicks::Now();
loop()->Run();
end_time_ = base::TimeTicks::Now();
- stream->Stop();
- stream->Close();
+
+ StopAndCloseAudioInputStreamOnAudioThread();
double average_time_between_callbacks_ms =
AverageTimeBetweenCallbacks(num_callbacks);
@@ -536,6 +676,41 @@ class AudioAndroidInputTest : public AudioAndroidOutputTest,
1.30 * expected_time_between_callbacks_ms);
}
+ void GetDefaultInputStreamParameters() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_parameters_ = audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ }
+
+ void MakeInputStream(const AudioParameters& params) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_stream_ = audio_manager()->MakeAudioInputStream(
+ params, AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(audio_input_stream_);
+ }
+
+ void OpenAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ void OpenAndStart(AudioInputStream::AudioInputCallback* sink) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Start(sink);
+ }
+
+ void StopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_stream_->Stop();
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ AudioInputStream* audio_input_stream_;
+ AudioParameters audio_input_parameters_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioAndroidInputTest);
@@ -545,35 +720,48 @@ class AudioAndroidInputTest : public AudioAndroidOutputTest,
TEST_P(AudioAndroidInputTest, GetDefaultInputStreamParameters) {
// We don't go through AudioAndroidInputTest::GetInputStreamParameters() here
// so that we can log the real (non-overridden) values of the effects.
- AudioParameters params = audio_manager()->GetInputStreamParameters(
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(params.IsValid());
- VLOG(1) << params;
+ GetDefaultInputStreamParametersOnAudioThread();
+ EXPECT_TRUE(audio_input_parameters().IsValid());
+ VLOG(1) << audio_input_parameters();
}
// Get the default audio output parameters and log the result.
TEST_F(AudioAndroidOutputTest, GetDefaultOutputStreamParameters) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- EXPECT_TRUE(params.IsValid());
- VLOG(1) << params;
+ GetDefaultOutputStreamParametersOnAudioThread();
+ VLOG(1) << audio_output_parameters();
+}
+
+// Verify input device enumeration.
+TEST_F(AudioAndroidInputTest, GetAudioInputDeviceNames) {
+ if (!audio_manager()->HasAudioInputDevices())
+ return;
+ AudioDeviceNames devices;
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioInputDeviceNames,
+ base::Unretained(audio_manager()),
+ &devices));
+ CheckDeviceNames(devices);
}
-// Check if low-latency output is supported and log the result as output.
-TEST_F(AudioAndroidOutputTest, IsAudioLowLatencySupported) {
- AudioManagerAndroid* manager =
- static_cast<AudioManagerAndroid*>(audio_manager());
- bool low_latency = manager->IsAudioLowLatencySupported();
- low_latency ? VLOG(0) << "Low latency output is supported"
- : VLOG(0) << "Low latency output is *not* supported";
+// Verify output device enumeration.
+TEST_F(AudioAndroidOutputTest, GetAudioOutputDeviceNames) {
+ if (!audio_manager()->HasAudioOutputDevices())
+ return;
+ AudioDeviceNames devices;
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioOutputDeviceNames,
+ base::Unretained(audio_manager()),
+ &devices));
+ CheckDeviceNames(devices);
}
// Ensure that a default input stream can be created and closed.
TEST_P(AudioAndroidInputTest, CreateAndCloseInputStream) {
AudioParameters params = GetInputStreamParameters();
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- ais->Close();
+ MakeAudioInputStreamOnAudioThread(params);
+ RunOnAudioThread(
+ base::Bind(&AudioInputStream::Close,
+ base::Unretained(audio_input_stream_)));
}
// Ensure that a default output stream can be created and closed.
@@ -581,45 +769,39 @@ TEST_P(AudioAndroidInputTest, CreateAndCloseInputStream) {
// to communication mode, and calls RegisterHeadsetReceiver, the first time
// it is called?
TEST_F(AudioAndroidOutputTest, CreateAndCloseOutputStream) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(aos);
- aos->Close();
+ GetDefaultOutputStreamParametersOnAudioThread();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
+ RunOnAudioThread(
+ base::Bind(&AudioOutputStream::Close,
+ base::Unretained(audio_output_stream_)));
}
// Ensure that a default input stream can be opened and closed.
TEST_P(AudioAndroidInputTest, OpenAndCloseInputStream) {
AudioParameters params = GetInputStreamParameters();
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- EXPECT_TRUE(ais->Open());
- ais->Close();
+ MakeAudioInputStreamOnAudioThread(params);
+ OpenAndCloseAudioInputStreamOnAudioThread();
}
// Ensure that a default output stream can be opened and closed.
TEST_F(AudioAndroidOutputTest, OpenAndCloseOutputStream) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(aos);
- EXPECT_TRUE(aos->Open());
- aos->Close();
+ GetDefaultOutputStreamParametersOnAudioThread();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
+ OpenAndCloseAudioOutputStreamOnAudioThread();
}
// Start input streaming using default input parameters and ensure that the
// callback sequence is sane.
-TEST_P(AudioAndroidInputTest, StartInputStreamCallbacks) {
- AudioParameters params = GetInputStreamParameters();
- StartInputStreamCallbacks(params);
+TEST_P(AudioAndroidInputTest, DISABLED_StartInputStreamCallbacks) {
+ AudioParameters native_params = GetInputStreamParameters();
+ StartInputStreamCallbacks(native_params);
}
// Start input streaming using non default input parameters and ensure that the
// callback sequence is sane. The only change we make in this test is to select
// a 10ms buffer size instead of the default size.
-// TODO(henrika): possibly add support for more variations.
-TEST_P(AudioAndroidInputTest, StartInputStreamCallbacksNonDefaultParameters) {
+TEST_P(AudioAndroidInputTest,
+ DISABLED_StartInputStreamCallbacksNonDefaultParameters) {
AudioParameters native_params = GetInputStreamParameters();
AudioParameters params(native_params.format(),
native_params.channel_layout(),
@@ -634,8 +816,8 @@ TEST_P(AudioAndroidInputTest, StartInputStreamCallbacksNonDefaultParameters) {
// Start output streaming using default output parameters and ensure that the
// callback sequence is sane.
TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacks) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- StartOutputStreamCallbacks(params);
+ GetDefaultOutputStreamParametersOnAudioThread();
+ StartOutputStreamCallbacks(audio_output_parameters());
}
// Start output streaming using non default output parameters and ensure that
@@ -643,13 +825,13 @@ TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacks) {
// select a 10ms buffer size instead of the default size and to open up the
// device in mono.
// TODO(henrika): possibly add support for more variations.
-TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
- AudioParameters native_params = GetDefaultOutputStreamParameters();
- AudioParameters params(native_params.format(),
+TEST_F(AudioAndroidOutputTest, DISABLED_StartOutputStreamCallbacksNonDefaultParameters) {
+ GetDefaultOutputStreamParametersOnAudioThread();
+ AudioParameters params(audio_output_parameters().format(),
CHANNEL_LAYOUT_MONO,
- native_params.sample_rate(),
- native_params.bits_per_sample(),
- native_params.sample_rate() / 100);
+ audio_output_parameters().sample_rate(),
+ audio_output_parameters().bits_per_sample(),
+ audio_output_parameters().sample_rate() / 100);
StartOutputStreamCallbacks(params);
}
@@ -658,13 +840,12 @@ TEST_F(AudioAndroidOutputTest, StartOutputStreamCallbacksNonDefaultParameters) {
// NOTE: this test requires user interaction and is not designed to run as an
// automatized test on bots.
TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
- AudioParameters params = GetDefaultOutputStreamParameters();
- VLOG(1) << params;
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- params, std::string(), std::string());
- EXPECT_TRUE(aos);
+ GetDefaultOutputStreamParametersOnAudioThread();
+ VLOG(1) << audio_output_parameters();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
std::string file_name;
+ const AudioParameters params = audio_output_parameters();
if (params.sample_rate() == 48000 && params.channels() == 2) {
file_name = kSpeechFile_16b_s_48k;
} else if (params.sample_rate() == 48000 && params.channels() == 1) {
@@ -681,13 +862,10 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
base::WaitableEvent event(false, false);
FileAudioSource source(&event, file_name);
- EXPECT_TRUE(aos->Open());
- aos->SetVolume(1.0);
- aos->Start(&source);
+ OpenAndStartAudioOutputStreamOnAudioThread(&source);
VLOG(0) << ">> Verify that the file is played out correctly...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- aos->Stop();
- aos->Close();
+ StopAndCloseAudioOutputStreamOnAudioThread();
}
// Start input streaming and run it for ten seconds while recording to a
@@ -697,9 +875,7 @@ TEST_F(AudioAndroidOutputTest, DISABLED_RunOutputStreamWithFileAsSource) {
TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
AudioParameters params = GetInputStreamParameters();
VLOG(1) << params;
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
+ MakeAudioInputStreamOnAudioThread(params);
std::string file_name = base::StringPrintf("out_simplex_%d_%d_%d.pcm",
params.sample_rate(),
@@ -709,12 +885,10 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
base::WaitableEvent event(false, false);
FileAudioSink sink(&event, params, file_name);
- EXPECT_TRUE(ais->Open());
- ais->Start(&sink);
+ OpenAndStartAudioInputStreamOnAudioThread(&sink);
VLOG(0) << ">> Speak into the microphone to record audio...";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- ais->Stop();
- ais->Close();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
// Same test as RunSimplexInputStreamWithFileAsSink but this time output
@@ -723,15 +897,12 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunSimplexInputStreamWithFileAsSink) {
// automatized test on bots.
TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
AudioParameters in_params = GetInputStreamParameters();
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- in_params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
+ VLOG(1) << in_params;
+ MakeAudioInputStreamOnAudioThread(in_params);
- AudioParameters out_params =
- audio_manager()->GetDefaultOutputStreamParameters();
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- out_params, std::string(), std::string());
- EXPECT_TRUE(aos);
+ GetDefaultOutputStreamParametersOnAudioThread();
+ VLOG(1) << audio_output_parameters();
+ MakeAudioOutputStreamOnAudioThread(audio_output_parameters());
std::string file_name = base::StringPrintf("out_duplex_%d_%d_%d.pcm",
in_params.sample_rate(),
@@ -740,23 +911,18 @@ TEST_P(AudioAndroidInputTest, DISABLED_RunDuplexInputStreamWithFileAsSink) {
base::WaitableEvent event(false, false);
FileAudioSink sink(&event, in_params, file_name);
- MockAudioOutputCallback source;
+ MockAudioSourceCallback source;
- EXPECT_CALL(source, OnMoreData(NotNull(), _)).WillRepeatedly(
- Invoke(&source, &MockAudioOutputCallback::RealOnMoreData));
- EXPECT_CALL(source, OnError(aos)).Times(0);
- EXPECT_CALL(source, OnMoreIOData(_, _, _)).Times(0);
+ EXPECT_CALL(source, OnMoreData(NotNull(), _))
+ .WillRepeatedly(Invoke(RealOnMoreData));
+ EXPECT_CALL(source, OnError(audio_output_stream_)).Times(0);
- EXPECT_TRUE(ais->Open());
- EXPECT_TRUE(aos->Open());
- ais->Start(&sink);
- aos->Start(&source);
+ OpenAndStartAudioInputStreamOnAudioThread(&sink);
+ OpenAndStartAudioOutputStreamOnAudioThread(&source);
VLOG(0) << ">> Speak into the microphone to record audio";
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
- aos->Stop();
- ais->Stop();
- aos->Close();
- ais->Close();
+ StopAndCloseAudioOutputStreamOnAudioThread();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
// Start audio in both directions while feeding captured data into a FIFO so
@@ -776,18 +942,17 @@ TEST_P(AudioAndroidInputTest,
// audio on Android.
AudioParameters io_params(default_input_params.format(),
default_input_params.channel_layout(),
+ ChannelLayoutToChannelCount(
+ default_input_params.channel_layout()),
default_input_params.sample_rate(),
default_input_params.bits_per_sample(),
- default_input_params.sample_rate() / 100);
+ default_input_params.sample_rate() / 100,
+ default_input_params.effects());
VLOG(1) << io_params;
// Create input and output streams using the common audio parameters.
- AudioInputStream* ais = audio_manager()->MakeAudioInputStream(
- io_params, AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(ais);
- AudioOutputStream* aos = audio_manager()->MakeAudioOutputStream(
- io_params, std::string(), std::string());
- EXPECT_TRUE(aos);
+ MakeAudioInputStreamOnAudioThread(io_params);
+ MakeAudioOutputStreamOnAudioThread(io_params);
FullDuplexAudioSinkSource full_duplex(io_params);
@@ -795,20 +960,16 @@ TEST_P(AudioAndroidInputTest,
// delay we should expect from the FIFO. If real-time delay measurements are
// performed, the result should be reduced by this extra delay since it is
// something that has been added by the test.
- EXPECT_TRUE(ais->Open());
- EXPECT_TRUE(aos->Open());
- ais->Start(&full_duplex);
- aos->Start(&full_duplex);
+ OpenAndStartAudioInputStreamOnAudioThread(&full_duplex);
+ OpenAndStartAudioOutputStreamOnAudioThread(&full_duplex);
VLOG(1) << "HINT: an estimate of the extra FIFO delay will be updated "
<< "once per second during this test.";
VLOG(0) << ">> Speak into the mic and listen to the audio in loopback...";
fflush(stdout);
base::PlatformThread::Sleep(base::TimeDelta::FromSeconds(20));
printf("\n");
- aos->Stop();
- ais->Stop();
- aos->Close();
- ais->Close();
+ StopAndCloseAudioOutputStreamOnAudioThread();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
INSTANTIATE_TEST_CASE_P(AudioAndroidInputTest, AudioAndroidInputTest,
diff --git a/chromium/media/audio/android/audio_manager_android.cc b/chromium/media/audio/android/audio_manager_android.cc
index 3464d89a30f..48f203ab74e 100644
--- a/chromium/media/audio/android/audio_manager_android.cc
+++ b/chromium/media/audio/android/audio_manager_android.cc
@@ -9,6 +9,7 @@
#include "base/android/jni_string.h"
#include "base/android/scoped_java_ref.h"
#include "base/logging.h"
+#include "base/message_loop/message_loop.h"
#include "base/strings/string_number_conversions.h"
#include "jni/AudioManagerAndroid_jni.h"
#include "media/audio/android/audio_record_input.h"
@@ -37,9 +38,6 @@ static void AddDefaultDevice(AudioDeviceNames* device_names) {
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 10;
-static const int kAudioModeNormal = 0x00000000;
-static const int kAudioModeInCommunication = 0x00000003;
-
static const int kDefaultInputBufferSize = 1024;
static const int kDefaultOutputBufferSize = 2048;
@@ -48,19 +46,26 @@ AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
}
AudioManagerAndroid::AudioManagerAndroid(AudioLogFactory* audio_log_factory)
- : AudioManagerBase(audio_log_factory) {
+ : AudioManagerBase(audio_log_factory),
+ communication_mode_is_on_(false) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
- j_audio_manager_.Reset(
- Java_AudioManagerAndroid_createAudioManagerAndroid(
- base::android::AttachCurrentThread(),
- base::android::GetApplicationContext(),
- reinterpret_cast<intptr_t>(this)));
- Init();
+ // WARNING: This is executed on the UI loop, do not add any code here which
+ // loads libraries or attempts to call out into the OS. Instead add such code
+ // to the InitializeOnAudioThread() method below.
+
+ // Task must be posted last to avoid races from handing out "this" to the
+ // audio thread.
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerAndroid::InitializeOnAudioThread,
+ base::Unretained(this)));
}
AudioManagerAndroid::~AudioManagerAndroid() {
- Close();
+ // It's safe to post a task here since Shutdown() will wait for all tasks to
+ // complete before returning.
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerAndroid::ShutdownOnAudioThread, base::Unretained(this)));
Shutdown();
}
@@ -74,13 +79,22 @@ bool AudioManagerAndroid::HasAudioInputDevices() {
void AudioManagerAndroid::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
// Always add default device parameters as first element.
+ DCHECK(device_names->empty());
AddDefaultDevice(device_names);
+ // Get list of available audio devices.
JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jobjectArray> j_device_array =
Java_AudioManagerAndroid_getAudioInputDeviceNames(
env, j_audio_manager_.obj());
+ if (j_device_array.is_null()) {
+ // Most probable reason for a NULL result here is that the process lacks
+ // MODIFY_AUDIO_SETTINGS or RECORD_AUDIO permissions.
+ return;
+ }
jsize len = env->GetArrayLength(j_device_array.obj());
AudioDeviceName device;
for (jsize i = 0; i < len; ++i) {
@@ -104,76 +118,96 @@ void AudioManagerAndroid::GetAudioOutputDeviceNames(
AudioParameters AudioManagerAndroid::GetInputStreamParameters(
const std::string& device_id) {
- JNIEnv* env = AttachCurrentThread();
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
// Use mono as preferred number of input channels on Android to save
// resources. Using mono also avoids a driver issue seen on Samsung
// Galaxy S3 and S4 devices. See http://crbug.com/256851 for details.
+ JNIEnv* env = AttachCurrentThread();
ChannelLayout channel_layout = CHANNEL_LAYOUT_MONO;
int buffer_size = Java_AudioManagerAndroid_getMinInputFrameSize(
env, GetNativeOutputSampleRate(),
ChannelLayoutToChannelCount(channel_layout));
+ buffer_size = buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size;
int effects = AudioParameters::NO_EFFECTS;
effects |= Java_AudioManagerAndroid_shouldUseAcousticEchoCanceler(env) ?
AudioParameters::ECHO_CANCELLER : AudioParameters::NO_EFFECTS;
+
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size)
+ buffer_size = user_buffer_size;
+
AudioParameters params(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, 0,
- GetNativeOutputSampleRate(), 16,
- buffer_size <= 0 ? kDefaultInputBufferSize : buffer_size, effects);
+ GetNativeOutputSampleRate(), 16, buffer_size, effects);
return params;
}
AudioOutputStream* AudioManagerAndroid::MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
AudioOutputStream* stream =
- AudioManagerBase::MakeAudioOutputStream(params, std::string(),
- std::string());
- if (stream && output_stream_count() == 1) {
- SetAudioMode(kAudioModeInCommunication);
- }
-
- {
- base::AutoLock lock(streams_lock_);
- streams_.insert(static_cast<OpenSLESOutputStream*>(stream));
- }
-
+ AudioManagerBase::MakeAudioOutputStream(params, std::string());
+ streams_.insert(static_cast<OpenSLESOutputStream*>(stream));
return stream;
}
AudioInputStream* AudioManagerAndroid::MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ bool has_no_input_streams = HasNoAudioInputStreams();
AudioInputStream* stream =
AudioManagerBase::MakeAudioInputStream(params, device_id);
+
+ // The audio manager for Android creates streams intended for real-time
+ // VoIP sessions and therefore sets the audio mode to MODE_IN_COMMUNICATION.
+ // If a Bluetooth headset is used, the audio stream will use the SCO
+ // channel and therefore have a limited bandwidth (8kHz).
+ if (stream && has_no_input_streams) {
+ communication_mode_is_on_ = true;
+ SetCommunicationAudioModeOn(true);
+ }
return stream;
}
void AudioManagerAndroid::ReleaseOutputStream(AudioOutputStream* stream) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
AudioManagerBase::ReleaseOutputStream(stream);
- if (!output_stream_count()) {
- SetAudioMode(kAudioModeNormal);
- }
- base::AutoLock lock(streams_lock_);
streams_.erase(static_cast<OpenSLESOutputStream*>(stream));
}
void AudioManagerAndroid::ReleaseInputStream(AudioInputStream* stream) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ DCHECK(!j_audio_manager_.is_null());
AudioManagerBase::ReleaseInputStream(stream);
+
+ // Restore the audio mode which was used before the first communication-
+ // mode stream was created.
+ if (HasNoAudioInputStreams()) {
+ communication_mode_is_on_ = false;
+ SetCommunicationAudioModeOn(false);
+ }
}
AudioOutputStream* AudioManagerAndroid::MakeLinearOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new OpenSLESOutputStream(this, params);
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ return new OpenSLESOutputStream(this, params, SL_ANDROID_STREAM_MEDIA);
}
AudioOutputStream* AudioManagerAndroid::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return new OpenSLESOutputStream(this, params);
+
+ // Set stream type which matches the current system-wide audio mode used by
+ // the Android audio manager.
+ const SLint32 stream_type = communication_mode_is_on_ ?
+ SL_ANDROID_STREAM_VOICE : SL_ANDROID_STREAM_MEDIA;
+ return new OpenSLESOutputStream(this, params, stream_type);
}
AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
@@ -187,13 +221,18 @@ AudioInputStream* AudioManagerAndroid::MakeLinearInputStream(
AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
DLOG_IF(ERROR, device_id.empty()) << "Invalid device ID!";
- // Utilize the device ID to select the correct input device.
+
+ // Use the device ID to select the correct input device.
// Note that the input device is always associated with a certain output
// device, i.e., this selection does also switch the output device.
// All input and output streams will be affected by the device selection.
- SetAudioDevice(device_id);
+ if (!SetAudioDevice(device_id)) {
+ LOG(ERROR) << "Unable to select audio device!";
+ return NULL;
+ }
if (params.effects() != AudioParameters::NO_EFFECTS) {
// Platform effects can only be enabled through the AudioRecord path.
@@ -211,22 +250,25 @@ AudioInputStream* AudioManagerAndroid::MakeLowLatencyInputStream(
return new OpenSLESInputStream(this, params);
}
-int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
- int channels) {
- if (IsAudioLowLatencySupported()) {
- return GetAudioLowLatencyOutputFrameSize();
- } else {
- return std::max(kDefaultOutputBufferSize,
- Java_AudioManagerAndroid_getMinOutputFrameSize(
- base::android::AttachCurrentThread(),
- sample_rate, channels));
- }
+// static
+bool AudioManagerAndroid::RegisterAudioManager(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+void AudioManagerAndroid::SetMute(JNIEnv* env, jobject obj, jboolean muted) {
+ GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioManagerAndroid::DoSetMuteOnAudioThread,
+ base::Unretained(this),
+ muted));
}
AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = GetNativeOutputSampleRate();
@@ -252,57 +294,55 @@ AudioParameters AudioManagerAndroid::GetPreferredOutputStreamParameters(
sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
}
-// static
-bool AudioManagerAndroid::RegisterAudioManager(JNIEnv* env) {
- return RegisterNativesImpl(env);
+bool AudioManagerAndroid::HasNoAudioInputStreams() {
+ return input_stream_count() == 0;
}
-void AudioManagerAndroid::Init() {
+void AudioManagerAndroid::InitializeOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
+ // Create the Android audio manager on the audio thread.
+ DVLOG(2) << "Creating Java part of the audio manager";
+ j_audio_manager_.Reset(
+ Java_AudioManagerAndroid_createAudioManagerAndroid(
+ base::android::AttachCurrentThread(),
+ base::android::GetApplicationContext(),
+ reinterpret_cast<intptr_t>(this)));
+
+ // Prepare the list of audio devices and register receivers for device
+ // notifications.
Java_AudioManagerAndroid_init(
base::android::AttachCurrentThread(),
j_audio_manager_.obj());
}
-void AudioManagerAndroid::Close() {
+void AudioManagerAndroid::ShutdownOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ DVLOG(2) << "Destroying Java part of the audio manager";
Java_AudioManagerAndroid_close(
base::android::AttachCurrentThread(),
j_audio_manager_.obj());
+ j_audio_manager_.Reset();
}
-void AudioManagerAndroid::SetMute(JNIEnv* env, jobject obj, jboolean muted) {
- GetMessageLoop()->PostTask(
- FROM_HERE,
- base::Bind(
- &AudioManagerAndroid::DoSetMuteOnAudioThread,
- base::Unretained(this),
- muted));
-}
-
-void AudioManagerAndroid::DoSetMuteOnAudioThread(bool muted) {
- base::AutoLock lock(streams_lock_);
- for (OutputStreams::iterator it = streams_.begin();
- it != streams_.end(); ++it) {
- (*it)->SetMute(muted);
- }
-}
-
-void AudioManagerAndroid::SetAudioMode(int mode) {
- Java_AudioManagerAndroid_setMode(
+void AudioManagerAndroid::SetCommunicationAudioModeOn(bool on) {
+ Java_AudioManagerAndroid_setCommunicationAudioModeOn(
base::android::AttachCurrentThread(),
- j_audio_manager_.obj(), mode);
+ j_audio_manager_.obj(), on);
}
-void AudioManagerAndroid::SetAudioDevice(const std::string& device_id) {
- JNIEnv* env = AttachCurrentThread();
+bool AudioManagerAndroid::SetAudioDevice(const std::string& device_id) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
// Send the unique device ID to the Java audio manager and make the
// device switch. Provide an empty string to the Java audio manager
// if the default device is selected.
+ JNIEnv* env = AttachCurrentThread();
ScopedJavaLocalRef<jstring> j_device_id = ConvertUTF8ToJavaString(
env,
device_id == AudioManagerBase::kDefaultDeviceId ?
std::string() : device_id);
- Java_AudioManagerAndroid_setDevice(
+ return Java_AudioManagerAndroid_setDevice(
env, j_audio_manager_.obj(), j_device_id.obj());
}
@@ -324,4 +364,23 @@ int AudioManagerAndroid::GetAudioLowLatencyOutputFrameSize() {
j_audio_manager_.obj());
}
+int AudioManagerAndroid::GetOptimalOutputFrameSize(int sample_rate,
+ int channels) {
+ if (IsAudioLowLatencySupported())
+ return GetAudioLowLatencyOutputFrameSize();
+
+ return std::max(kDefaultOutputBufferSize,
+ Java_AudioManagerAndroid_getMinOutputFrameSize(
+ base::android::AttachCurrentThread(),
+ sample_rate, channels));
+}
+
+void AudioManagerAndroid::DoSetMuteOnAudioThread(bool muted) {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ for (OutputStreams::iterator it = streams_.begin();
+ it != streams_.end(); ++it) {
+ (*it)->SetMute(muted);
+ }
+}
+
} // namespace media
diff --git a/chromium/media/audio/android/audio_manager_android.h b/chromium/media/audio/android/audio_manager_android.h
index 2900c0f8e29..ee5ad28e36e 100644
--- a/chromium/media/audio/android/audio_manager_android.h
+++ b/chromium/media/audio/android/audio_manager_android.h
@@ -10,6 +10,7 @@
#include "base/android/jni_android.h"
#include "base/gtest_prod_util.h"
#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_manager_base.h"
namespace media {
@@ -33,8 +34,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params,
const std::string& device_id) OVERRIDE;
@@ -46,8 +46,7 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params,
const std::string& device_id) OVERRIDE;
@@ -67,10 +66,12 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
- void Init();
- void Close();
- void SetAudioMode(int mode);
- void SetAudioDevice(const std::string& device_id);
+ void InitializeOnAudioThread();
+ void ShutdownOnAudioThread();
+
+ bool HasNoAudioInputStreams();
+ void SetCommunicationAudioModeOn(bool on);
+ bool SetAudioDevice(const std::string& device_id);
int GetNativeOutputSampleRate();
bool IsAudioLowLatencySupported();
int GetAudioLowLatencyOutputFrameSize();
@@ -78,18 +79,15 @@ class MEDIA_EXPORT AudioManagerAndroid : public AudioManagerBase {
void DoSetMuteOnAudioThread(bool muted);
- // Allow the AudioAndroidTest to access private methods.
- FRIEND_TEST_ALL_PREFIXES(AudioAndroidOutputTest, IsAudioLowLatencySupported);
-
// Java AudioManager instance.
base::android::ScopedJavaGlobalRef<jobject> j_audio_manager_;
typedef std::set<OpenSLESOutputStream*> OutputStreams;
OutputStreams streams_;
- // TODO(wjia): remove this lock once unit test modules are fixed to call
- // AudioManager::MakeAudioOutputStream on the audio thread. For now, this
- // lock is used to guard access to |streams_|.
- base::Lock streams_lock_;
+
+ // Enabled when first input stream is created and set to false when last
+ // input stream is destroyed. Also affects the stream type of output streams.
+ bool communication_mode_is_on_;
DISALLOW_COPY_AND_ASSIGN(AudioManagerAndroid);
};
diff --git a/chromium/media/audio/android/audio_record_input.cc b/chromium/media/audio/android/audio_record_input.cc
index 15a0c3d3b7b..3f19588b4a6 100644
--- a/chromium/media/audio/android/audio_record_input.cc
+++ b/chromium/media/audio/android/audio_record_input.cc
@@ -7,14 +7,18 @@
#include "base/logging.h"
#include "jni/AudioRecordInput_jni.h"
#include "media/audio/android/audio_manager_android.h"
+#include "media/base/audio_bus.h"
namespace media {
AudioRecordInputStream::AudioRecordInputStream(
- AudioManagerAndroid* audio_manager, const AudioParameters& params)
+ AudioManagerAndroid* audio_manager,
+ const AudioParameters& params)
: audio_manager_(audio_manager),
callback_(NULL),
- direct_buffer_address_(NULL) {
+ direct_buffer_address_(NULL),
+ audio_bus_(media::AudioBus::Create(params)),
+ bytes_per_sample_(params.bits_per_sample() / 8) {
DVLOG(2) << __PRETTY_FUNCTION__;
DCHECK(params.IsValid());
j_audio_record_.Reset(
@@ -48,10 +52,13 @@ bool AudioRecordInputStream::RegisterAudioRecordInput(JNIEnv* env) {
void AudioRecordInputStream::OnData(JNIEnv* env, jobject obj, jint size,
jint hardware_delay_bytes) {
DCHECK(direct_buffer_address_);
+ DCHECK_EQ(size,
+ audio_bus_->frames() * audio_bus_->channels() * bytes_per_sample_);
// Passing zero as the volume parameter indicates there is no access to a
// hardware volume slider.
- callback_->OnData(this, direct_buffer_address_, size, hardware_delay_bytes,
- 0.0);
+ audio_bus_->FromInterleaved(
+ direct_buffer_address_, audio_bus_->frames(), bytes_per_sample_);
+ callback_->OnData(this, audio_bus_.get(), hardware_delay_bytes, 0.0);
}
bool AudioRecordInputStream::Open() {
@@ -90,8 +97,7 @@ void AudioRecordInputStream::Stop() {
base::android::AttachCurrentThread(), j_audio_record_.obj());
// The Java thread must have been stopped at this point, so we are free to
- // set |callback_|.
- callback_->OnClose(this);
+ // clear |callback_|.
callback_ = NULL;
}
diff --git a/chromium/media/audio/android/audio_record_input.h b/chromium/media/audio/android/audio_record_input.h
index 0a2578b1079..c240038360b 100644
--- a/chromium/media/audio/android/audio_record_input.h
+++ b/chromium/media/audio/android/audio_record_input.h
@@ -12,6 +12,7 @@
namespace media {
+class AudioBus;
class AudioManagerAndroid;
// Implements PCM audio input support for Android using the Java AudioRecord
@@ -64,6 +65,9 @@ class MEDIA_EXPORT AudioRecordInputStream : public AudioInputStream {
// Owned by j_audio_record_.
uint8* direct_buffer_address_;
+ scoped_ptr<media::AudioBus> audio_bus_;
+ int bytes_per_sample_;
+
DISALLOW_COPY_AND_ASSIGN(AudioRecordInputStream);
};
diff --git a/chromium/media/audio/android/opensles_input.cc b/chromium/media/audio/android/opensles_input.cc
index e51ba4f3a97..1ef3aaca5ef 100644
--- a/chromium/media/audio/android/opensles_input.cc
+++ b/chromium/media/audio/android/opensles_input.cc
@@ -7,6 +7,7 @@
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "media/audio/android/audio_manager_android.h"
+#include "media/base/audio_bus.h"
#define LOG_ON_FAILURE_AND_RETURN(op, ...) \
do { \
@@ -27,7 +28,8 @@ OpenSLESInputStream::OpenSLESInputStream(AudioManagerAndroid* audio_manager,
simple_buffer_queue_(NULL),
active_buffer_index_(0),
buffer_size_bytes_(0),
- started_(false) {
+ started_(false),
+ audio_bus_(media::AudioBus::Create(params)) {
DVLOG(2) << __PRETTY_FUNCTION__;
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
@@ -132,6 +134,7 @@ void OpenSLESInputStream::Stop() {
(*simple_buffer_queue_)->Clear(simple_buffer_queue_));
started_ = false;
+ callback_ = NULL;
}
void OpenSLESInputStream::Close() {
@@ -141,15 +144,9 @@ void OpenSLESInputStream::Close() {
// Stop the stream if it is still recording.
Stop();
{
+ // TODO(henrika): Do we need to hold the lock here?
base::AutoLock lock(lock_);
- // TODO(henrika): we use |callback_| in Close() but |callback_| is set
- // in Start(). Hence, it should be cleared in Stop() and not used here.
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
-
// Destroy the buffer queue recorder object and invalidate all associated
// interfaces.
recorder_object_.Reset();
@@ -300,13 +297,14 @@ void OpenSLESInputStream::ReadBufferQueue() {
TRACE_EVENT0("audio", "OpenSLESOutputStream::ReadBufferQueue");
+ // Convert from interleaved format to deinterleaved audio bus format.
+ audio_bus_->FromInterleaved(audio_data_[active_buffer_index_],
+ audio_bus_->frames(),
+ format_.bitsPerSample / 8);
+
// TODO(henrika): Investigate if it is possible to get an accurate
// delay estimation.
- callback_->OnData(this,
- audio_data_[active_buffer_index_],
- buffer_size_bytes_,
- buffer_size_bytes_,
- 0.0);
+ callback_->OnData(this, audio_bus_.get(), buffer_size_bytes_, 0.0);
// Done with this buffer. Send it to device for recording.
SLresult err =
diff --git a/chromium/media/audio/android/opensles_input.h b/chromium/media/audio/android/opensles_input.h
index cb07d51f78b..288ab43425e 100644
--- a/chromium/media/audio/android/opensles_input.h
+++ b/chromium/media/audio/android/opensles_input.h
@@ -17,6 +17,7 @@
namespace media {
+class AudioBus;
class AudioManagerAndroid;
// Implements PCM audio input support for Android using the OpenSLES API.
@@ -94,6 +95,8 @@ class OpenSLESInputStream : public AudioInputStream {
bool started_;
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(OpenSLESInputStream);
};
diff --git a/chromium/media/audio/android/opensles_output.cc b/chromium/media/audio/android/opensles_output.cc
index b71680f0a7e..41c03c7867a 100644
--- a/chromium/media/audio/android/opensles_output.cc
+++ b/chromium/media/audio/android/opensles_output.cc
@@ -20,8 +20,10 @@
namespace media {
OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
- const AudioParameters& params)
+ const AudioParameters& params,
+ SLint32 stream_type)
: audio_manager_(manager),
+ stream_type_(stream_type),
callback_(NULL),
player_(NULL),
simple_buffer_queue_(NULL),
@@ -30,7 +32,8 @@ OpenSLESOutputStream::OpenSLESOutputStream(AudioManagerAndroid* manager,
started_(false),
muted_(false),
volume_(1.0) {
- DVLOG(2) << "OpenSLESOutputStream::OpenSLESOutputStream()";
+ DVLOG(2) << "OpenSLESOutputStream::OpenSLESOutputStream("
+ << "stream_type=" << stream_type << ")";
format_.formatType = SL_DATAFORMAT_PCM;
format_.numChannels = static_cast<SLuint32>(params.channels());
// Provides sampling rate in milliHertz to OpenSLES.
@@ -248,11 +251,11 @@ bool OpenSLESOutputStream::CreatePlayer() {
player_object_.Get(), SL_IID_ANDROIDCONFIGURATION, &player_config),
false);
- SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+ // Set configuration using the stream type provided at construction.
LOG_ON_FAILURE_AND_RETURN(
(*player_config)->SetConfiguration(player_config,
SL_ANDROID_KEY_STREAM_TYPE,
- &stream_type,
+ &stream_type_,
sizeof(SLint32)),
false);
diff --git a/chromium/media/audio/android/opensles_output.h b/chromium/media/audio/android/opensles_output.h
index 623b0193894..b0b678cea6e 100644
--- a/chromium/media/audio/android/opensles_output.h
+++ b/chromium/media/audio/android/opensles_output.h
@@ -28,7 +28,8 @@ class OpenSLESOutputStream : public AudioOutputStream {
static const int kMaxNumOfBuffersInQueue = 2;
OpenSLESOutputStream(AudioManagerAndroid* manager,
- const AudioParameters& params);
+ const AudioParameters& params,
+ SLint32 stream_type);
virtual ~OpenSLESOutputStream();
@@ -77,6 +78,10 @@ class OpenSLESOutputStream : public AudioOutputStream {
AudioManagerAndroid* audio_manager_;
+ // Audio playback stream type.
+ // See SLES/OpenSLES_Android.h for details.
+ SLint32 stream_type_;
+
AudioSourceCallback* callback_;
// Shared engine interfaces for the app.
diff --git a/chromium/media/audio/audio_input_controller.cc b/chromium/media/audio/audio_input_controller.cc
index ef94d1274d6..490c62b3c16 100644
--- a/chromium/media/audio/audio_input_controller.cc
+++ b/chromium/media/audio/audio_input_controller.cc
@@ -5,24 +5,46 @@
#include "media/audio/audio_input_controller.h"
#include "base/bind.h"
+#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
+#include "base/time/time.h"
#include "media/base/limits.h"
#include "media/base/scoped_histogram_timer.h"
#include "media/base/user_input_monitor.h"
+using base::TimeDelta;
+
namespace {
-const int kMaxInputChannels = 2;
+const int kMaxInputChannels = 3;
// TODO(henrika): remove usage of timers and add support for proper
// notification of when the input device is removed. This was originally added
// to resolve http://crbug.com/79936 for Windows platforms. This then caused
// breakage (very hard to repro bugs!) on other platforms: See
// http://crbug.com/226327 and http://crbug.com/230972.
+// See also that the timer has been disabled on Mac now due to
+// crbug.com/357501.
const int kTimerResetIntervalSeconds = 1;
// We have received reports that the timer can be too trigger happy on some
// Mac devices and the initial timer interval has therefore been increased
// from 1 second to 5 seconds.
const int kTimerInitialIntervalSeconds = 5;
+
+#if defined(AUDIO_POWER_MONITORING)
+// Time constant for AudioPowerMonitor.
+// The utilized smoothing factor (alpha) in the exponential filter is given
+// by 1-exp(-1/(fs*ts)), where fs is the sample rate in Hz and ts is the time
+// constant given by |kPowerMeasurementTimeConstantMilliseconds|.
+// Example: fs=44100, ts=10e-3 => alpha~0.022420
+// fs=44100, ts=20e-3 => alpha~0.165903
+// A large smoothing factor corresponds to a faster filter response to input
+// changes since y(n)=alpha*x(n)+(1-alpha)*y(n-1), where x(n) is the input
+// and y(n) is the output.
+const int kPowerMeasurementTimeConstantMilliseconds = 10;
+
+// Time in seconds between two successive measurements of audio power levels.
+const int kPowerMonitorLogIntervalSeconds = 5;
+#endif
}
namespace media {
@@ -33,20 +55,20 @@ AudioInputController::Factory* AudioInputController::factory_ = NULL;
AudioInputController::AudioInputController(EventHandler* handler,
SyncWriter* sync_writer,
UserInputMonitor* user_input_monitor)
- : creator_loop_(base::MessageLoopProxy::current()),
+ : creator_task_runner_(base::MessageLoopProxy::current()),
handler_(handler),
stream_(NULL),
data_is_active_(false),
- state_(kEmpty),
+ state_(CLOSED),
sync_writer_(sync_writer),
max_volume_(0.0),
user_input_monitor_(user_input_monitor),
prev_key_down_count_(0) {
- DCHECK(creator_loop_.get());
+ DCHECK(creator_task_runner_.get());
}
AudioInputController::~AudioInputController() {
- DCHECK(kClosed == state_ || kCreated == state_ || kEmpty == state_);
+ DCHECK_EQ(state_, CLOSED);
}
// static
@@ -68,11 +90,11 @@ scoped_refptr<AudioInputController> AudioInputController::Create(
scoped_refptr<AudioInputController> controller(
new AudioInputController(event_handler, NULL, user_input_monitor));
- controller->message_loop_ = audio_manager->GetMessageLoop();
+ controller->task_runner_ = audio_manager->GetTaskRunner();
// Create and open a new audio input stream from the existing
// audio-device thread.
- if (!controller->message_loop_->PostTask(FROM_HERE,
+ if (!controller->task_runner_->PostTask(FROM_HERE,
base::Bind(&AudioInputController::DoCreate, controller,
base::Unretained(audio_manager), params, device_id))) {
controller = NULL;
@@ -99,11 +121,11 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
// the audio-manager thread.
scoped_refptr<AudioInputController> controller(
new AudioInputController(event_handler, sync_writer, user_input_monitor));
- controller->message_loop_ = audio_manager->GetMessageLoop();
+ controller->task_runner_ = audio_manager->GetTaskRunner();
// Create and open a new audio input stream from the existing
// audio-device thread. Use the provided audio-input device.
- if (!controller->message_loop_->PostTask(FROM_HERE,
+ if (!controller->task_runner_->PostTask(FROM_HERE,
base::Bind(&AudioInputController::DoCreate, controller,
base::Unretained(audio_manager), params, device_id))) {
controller = NULL;
@@ -114,7 +136,7 @@ scoped_refptr<AudioInputController> AudioInputController::CreateLowLatency(
// static
scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
EventHandler* event_handler,
AudioInputStream* stream,
SyncWriter* sync_writer,
@@ -126,14 +148,14 @@ scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
// the audio-manager thread.
scoped_refptr<AudioInputController> controller(
new AudioInputController(event_handler, sync_writer, user_input_monitor));
- controller->message_loop_ = message_loop;
+ controller->task_runner_ = task_runner;
// TODO(miu): See TODO at top of file. Until that's resolved, we need to
// disable the error auto-detection here (since the audio mirroring
// implementation will reliably report error and close events). Note, of
// course, that we're assuming CreateForStream() has been called for the audio
// mirroring use case only.
- if (!controller->message_loop_->PostTask(
+ if (!controller->task_runner_->PostTask(
FROM_HERE,
base::Bind(&AudioInputController::DoCreateForStream, controller,
stream, false))) {
@@ -144,33 +166,45 @@ scoped_refptr<AudioInputController> AudioInputController::CreateForStream(
}
void AudioInputController::Record() {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoRecord, this));
}
void AudioInputController::Close(const base::Closure& closed_task) {
DCHECK(!closed_task.is_null());
- DCHECK(creator_loop_->BelongsToCurrentThread());
+ DCHECK(creator_task_runner_->BelongsToCurrentThread());
- message_loop_->PostTaskAndReply(
+ task_runner_->PostTaskAndReply(
FROM_HERE, base::Bind(&AudioInputController::DoClose, this), closed_task);
}
void AudioInputController::SetVolume(double volume) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoSetVolume, this, volume));
}
void AudioInputController::SetAutomaticGainControl(bool enabled) {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoSetAutomaticGainControl, this, enabled));
}
void AudioInputController::DoCreate(AudioManager* audio_manager,
const AudioParameters& params,
const std::string& device_id) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CreateTime");
+
+#if defined(AUDIO_POWER_MONITORING)
+ // Create the audio (power) level meter given the provided audio parameters.
+ // An AudioBus is also needed to wrap the raw data buffer from the native
+ // layer to match AudioPowerMonitor::Scan().
+ // TODO(henrika): Remove use of extra AudioBus. See http://crbug.com/375155.
+ audio_level_.reset(new media::AudioPowerMonitor(
+ params.sample_rate(),
+ TimeDelta::FromMilliseconds(kPowerMeasurementTimeConstantMilliseconds)));
+ audio_params_ = params;
+#endif
+
// TODO(miu): See TODO at top of file. Until that's resolved, assume all
// platform audio input requires the |no_data_timer_| be used to auto-detect
// errors. In reality, probably only Windows needs to be treated as
@@ -181,38 +215,49 @@ void AudioInputController::DoCreate(AudioManager* audio_manager,
void AudioInputController::DoCreateForStream(
AudioInputStream* stream_to_control, bool enable_nodata_timer) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!stream_);
stream_ = stream_to_control;
if (!stream_) {
- handler_->OnError(this);
+ if (handler_)
+ handler_->OnError(this, STREAM_CREATE_ERROR);
return;
}
if (stream_ && !stream_->Open()) {
stream_->Close();
stream_ = NULL;
- handler_->OnError(this);
+ if (handler_)
+ handler_->OnError(this, STREAM_OPEN_ERROR);
return;
}
DCHECK(!no_data_timer_.get());
+
+ // The timer is enabled for logging purposes. The NO_DATA_ERROR triggered
+ // from the timer must be ignored by the EventHandler.
+ // TODO(henrika): remove usage of timer when it has been verified on Canary
+ // that we are safe doing so. Goal is to get rid of |no_data_timer_| and
+ // everything that is tied to it. crbug.com/357569.
+ enable_nodata_timer = true;
+
if (enable_nodata_timer) {
- // Create the data timer which will call DoCheckForNoData(). The timer
+ // Create the data timer which will call FirstCheckForNoData(). The timer
// is started in DoRecord() and restarted in each DoCheckForNoData()
// callback.
no_data_timer_.reset(new base::Timer(
FROM_HERE, base::TimeDelta::FromSeconds(kTimerInitialIntervalSeconds),
- base::Bind(&AudioInputController::DoCheckForNoData,
+ base::Bind(&AudioInputController::FirstCheckForNoData,
base::Unretained(this)), false));
} else {
DVLOG(1) << "Disabled: timer check for no data.";
}
- state_ = kCreated;
- handler_->OnCreated(this);
+ state_ = CREATED;
+ if (handler_)
+ handler_->OnCreated(this);
if (user_input_monitor_) {
user_input_monitor_->EnableKeyPressMonitoring();
@@ -221,60 +266,62 @@ void AudioInputController::DoCreateForStream(
}
void AudioInputController::DoRecord() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.RecordTime");
- if (state_ != kCreated)
+ if (state_ != CREATED)
return;
{
base::AutoLock auto_lock(lock_);
- state_ = kRecording;
+ state_ = RECORDING;
}
if (no_data_timer_) {
// Start the data timer. Once |kTimerResetIntervalSeconds| have passed,
- // a callback to DoCheckForNoData() is made.
+ // a callback to FirstCheckForNoData() is made.
no_data_timer_->Reset();
}
stream_->Start(this);
- handler_->OnRecording(this);
+ if (handler_)
+ handler_->OnRecording(this);
}
void AudioInputController::DoClose() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
SCOPED_UMA_HISTOGRAM_TIMER("Media.AudioInputController.CloseTime");
+ if (state_ == CLOSED)
+ return;
+
// Delete the timer on the same thread that created it.
no_data_timer_.reset();
- if (state_ != kClosed) {
- DoStopCloseAndClearStream(NULL);
- SetDataIsActive(false);
+ DoStopCloseAndClearStream();
+ SetDataIsActive(false);
- if (LowLatencyMode()) {
- sync_writer_->Close();
- }
+ if (SharedMemoryAndSyncSocketMode())
+ sync_writer_->Close();
- state_ = kClosed;
+ if (user_input_monitor_)
+ user_input_monitor_->DisableKeyPressMonitoring();
- if (user_input_monitor_)
- user_input_monitor_->DisableKeyPressMonitoring();
- }
+ state_ = CLOSED;
}
void AudioInputController::DoReportError() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- handler_->OnError(this);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (handler_)
+ handler_->OnError(this, STREAM_ERROR);
}
void AudioInputController::DoSetVolume(double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GE(volume, 0);
DCHECK_LE(volume, 1.0);
- if (state_ != kCreated && state_ != kRecording)
+ if (state_ != CREATED && state_ != RECORDING)
return;
// Only ask for the maximum volume at first call and use cached value
@@ -293,25 +340,32 @@ void AudioInputController::DoSetVolume(double volume) {
}
void AudioInputController::DoSetAutomaticGainControl(bool enabled) {
- DCHECK(message_loop_->BelongsToCurrentThread());
- DCHECK_NE(state_, kRecording);
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ DCHECK_NE(state_, RECORDING);
// Ensure that the AGC state only can be modified before streaming starts.
- if (state_ != kCreated || state_ == kRecording)
+ if (state_ != CREATED)
return;
stream_->SetAutomaticGainControl(enabled);
}
+void AudioInputController::FirstCheckForNoData() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ UMA_HISTOGRAM_BOOLEAN("Media.AudioInputControllerCaptureStartupSuccess",
+ GetDataIsActive());
+ DoCheckForNoData();
+}
+
void AudioInputController::DoCheckForNoData() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!GetDataIsActive()) {
// The data-is-active marker will be false only if it has been more than
// one second since a data packet was recorded. This can happen if a
// capture device has been removed or disabled.
- handler_->OnError(this);
- return;
+ if (handler_)
+ handler_->OnError(this, NO_DATA_ERROR);
}
// Mark data as non-active. The flag will be re-enabled in OnData() each
@@ -328,13 +382,16 @@ void AudioInputController::DoCheckForNoData() {
}
void AudioInputController::OnData(AudioInputStream* stream,
- const uint8* data,
- uint32 size,
+ const AudioBus* source,
uint32 hardware_delay_bytes,
double volume) {
+ // Mark data as active to ensure that the periodic calls to
+ // DoCheckForNoData() does not report an error to the event handler.
+ SetDataIsActive(true);
+
{
base::AutoLock auto_lock(lock_);
- if (state_ != kRecording)
+ if (state_ != RECORDING)
return;
}
@@ -346,36 +403,92 @@ void AudioInputController::OnData(AudioInputStream* stream,
DVLOG_IF(6, key_pressed) << "Detected keypress.";
}
- // Mark data as active to ensure that the periodic calls to
- // DoCheckForNoData() does not report an error to the event handler.
- SetDataIsActive(true);
-
- // Use SyncSocket if we are in a low-latency mode.
- if (LowLatencyMode()) {
- sync_writer_->Write(data, size, volume, key_pressed);
+ // Use SharedMemory and SyncSocket if the client has created a SyncWriter.
+ // Used by all low-latency clients except WebSpeech.
+ if (SharedMemoryAndSyncSocketMode()) {
+ sync_writer_->Write(source, volume, key_pressed);
sync_writer_->UpdateRecordedBytes(hardware_delay_bytes);
+
+#if defined(AUDIO_POWER_MONITORING)
+ // Only do power-level measurements if an AudioPowerMonitor object has
+ // been created. Done in DoCreate() but not DoCreateForStream(), hence
+ // logging will mainly be done for WebRTC and WebSpeech clients.
+ if (!audio_level_)
+ return;
+
+ // Perform periodic audio (power) level measurements.
+ if ((base::TimeTicks::Now() - last_audio_level_log_time_).InSeconds() >
+ kPowerMonitorLogIntervalSeconds) {
+ // Wrap data into an AudioBus to match AudioPowerMonitor::Scan.
+ // TODO(henrika): remove this section when capture side uses AudioBus.
+ // See http://crbug.com/375155 for details.
+ audio_level_->Scan(*source, source->frames());
+
+ // Get current average power level and add it to the log.
+ // Possible range is given by [-inf, 0] dBFS.
+ std::pair<float, bool> result = audio_level_->ReadCurrentPowerAndClip();
+
+ // Use event handler on the audio thread to relay a message to the ARIH
+ // in content which does the actual logging on the IO thread.
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioInputController::DoLogAudioLevel, this, result.first));
+
+ last_audio_level_log_time_ = base::TimeTicks::Now();
+
+ // Reset the average power level (since we don't log continuously).
+ audio_level_->Reset();
+ }
+#endif
return;
}
- handler_->OnData(this, data, size);
+ // TODO(henrika): Investigate if we can avoid the extra copy here.
+ // (see http://crbug.com/249316 for details). AFAIK, this scope is only
+ // active for WebSpeech clients.
+ scoped_ptr<AudioBus> audio_data =
+ AudioBus::Create(source->channels(), source->frames());
+ source->CopyTo(audio_data.get());
+
+ // Ownership of the audio buffer will be with the callback until it is run,
+ // when ownership is passed to the callback function.
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(
+ &AudioInputController::DoOnData, this, base::Passed(&audio_data)));
+}
+
+void AudioInputController::DoOnData(scoped_ptr<AudioBus> data) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (handler_)
+ handler_->OnData(this, data.get());
}
-void AudioInputController::OnClose(AudioInputStream* stream) {
- DVLOG(1) << "AudioInputController::OnClose()";
- // TODO(satish): Sometimes the device driver closes the input stream without
- // us asking for it (may be if the device was unplugged?). Check how to handle
- // such cases here.
+void AudioInputController::DoLogAudioLevel(float level_dbfs) {
+#if defined(AUDIO_POWER_MONITORING)
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ if (!handler_)
+ return;
+
+ std::string log_string = base::StringPrintf(
+ "AIC::OnData: average audio level=%.2f dBFS", level_dbfs);
+ static const float kSilenceThresholdDBFS = -72.24719896f;
+ if (level_dbfs < kSilenceThresholdDBFS)
+ log_string += " <=> no audio input!";
+
+ handler_->OnLog(this, log_string);
+#endif
}
void AudioInputController::OnError(AudioInputStream* stream) {
// Handle error on the audio-manager thread.
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioInputController::DoReportError, this));
}
-void AudioInputController::DoStopCloseAndClearStream(
- base::WaitableEvent* done) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+void AudioInputController::DoStopCloseAndClearStream() {
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Allow calling unconditionally and bail if we don't have a stream to close.
if (stream_ != NULL) {
@@ -384,9 +497,8 @@ void AudioInputController::DoStopCloseAndClearStream(
stream_ = NULL;
}
- // Should be last in the method, do not touch "this" from here on.
- if (done != NULL)
- done->Signal();
+ // The event handler should not be touched after the stream has been closed.
+ handler_ = NULL;
}
void AudioInputController::SetDataIsActive(bool enabled) {
diff --git a/chromium/media/audio/audio_input_controller.h b/chromium/media/audio/audio_input_controller.h
index 6b40459ded6..f2771c7e9c2 100644
--- a/chromium/media/audio/audio_input_controller.h
+++ b/chromium/media/audio/audio_input_controller.h
@@ -16,6 +16,9 @@
#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
+#include "media/audio/audio_parameters.h"
+#include "media/audio/audio_power_monitor.h"
+#include "media/base/audio_bus.h"
// An AudioInputController controls an AudioInputStream and records data
// from this input stream. The two main methods are Record() and Close() and
@@ -72,21 +75,52 @@
//
namespace media {
+// Only do power monitoring for non-mobile platforms to save resources.
+#if !defined(OS_ANDROID) && !defined(OS_IOS)
+#define AUDIO_POWER_MONITORING
+#endif
+
class UserInputMonitor;
class MEDIA_EXPORT AudioInputController
: public base::RefCountedThreadSafe<AudioInputController>,
public AudioInputStream::AudioInputCallback {
public:
+
+ // Error codes to make native loggin more clear. These error codes are added
+ // to generic error strings to provide a higher degree of details.
+ // Changing these values can lead to problems when matching native debug
+ // logs with the actual cause of error.
+ enum ErrorCode {
+ // An unspecified error occured.
+ UNKNOWN_ERROR = 0,
+
+ // Failed to create an audio input stream.
+ STREAM_CREATE_ERROR, // = 1
+
+ // Failed to open an audio input stream.
+ STREAM_OPEN_ERROR, // = 2
+
+ // Native input stream reports an error. Exact reason differs between
+ // platforms.
+ STREAM_ERROR, // = 3
+
+ // This can happen if a capture device has been removed or disabled.
+ NO_DATA_ERROR, // = 4
+ };
+
// An event handler that receives events from the AudioInputController. The
// following methods are all called on the audio thread.
class MEDIA_EXPORT EventHandler {
public:
virtual void OnCreated(AudioInputController* controller) = 0;
virtual void OnRecording(AudioInputController* controller) = 0;
- virtual void OnError(AudioInputController* controller) = 0;
- virtual void OnData(AudioInputController* controller, const uint8* data,
- uint32 size) = 0;
+ virtual void OnError(AudioInputController* controller,
+ ErrorCode error_code) = 0;
+ virtual void OnData(AudioInputController* controller,
+ const AudioBus* data) = 0;
+ virtual void OnLog(AudioInputController* controller,
+ const std::string& message) = 0;
protected:
virtual ~EventHandler() {}
@@ -102,12 +136,10 @@ class MEDIA_EXPORT AudioInputController
// soundcard which has been recorded.
virtual void UpdateRecordedBytes(uint32 bytes) = 0;
- // Write certain amount of data from |data|. This method returns
- // number of written bytes.
- virtual uint32 Write(const void* data,
- uint32 size,
- double volume,
- bool key_pressed) = 0;
+ // Write certain amount of data from |data|.
+ virtual void Write(const AudioBus* data,
+ double volume,
+ bool key_pressed) = 0;
// Close this synchronous writer.
virtual void Close() = 0;
@@ -165,7 +197,7 @@ class MEDIA_EXPORT AudioInputController
// OnCreated() call from that same thread. |user_input_monitor| is used for
// typing detection and can be NULL.
static scoped_refptr<AudioInputController> CreateForStream(
- const scoped_refptr<base::MessageLoopProxy>& message_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
EventHandler* event_handler,
AudioInputStream* stream,
// External synchronous writer for audio controller.
@@ -196,23 +228,22 @@ class MEDIA_EXPORT AudioInputController
// AudioInputCallback implementation. Threading details depends on the
// device-specific implementation.
- virtual void OnData(AudioInputStream* stream, const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE;
- virtual void OnClose(AudioInputStream* stream) OVERRIDE;
+ virtual void OnData(AudioInputStream* stream,
+ const AudioBus* source,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE;
virtual void OnError(AudioInputStream* stream) OVERRIDE;
- bool LowLatencyMode() const { return sync_writer_ != NULL; }
+ bool SharedMemoryAndSyncSocketMode() const { return sync_writer_ != NULL; }
protected:
friend class base::RefCountedThreadSafe<AudioInputController>;
// Internal state of the source.
enum State {
- kEmpty,
- kCreated,
- kRecording,
- kClosed,
- kError
+ CREATED,
+ RECORDING,
+ CLOSED
};
AudioInputController(EventHandler* handler,
@@ -230,23 +261,28 @@ class MEDIA_EXPORT AudioInputController
void DoReportError();
void DoSetVolume(double volume);
void DoSetAutomaticGainControl(bool enabled);
+ void DoOnData(scoped_ptr<AudioBus> data);
+ void DoLogAudioLevel(float level_dbfs);
+
+ // Method to check if we get recorded data after a stream was started,
+ // and log the result to UMA.
+ void FirstCheckForNoData();
// Method which ensures that OnError() is triggered when data recording
// times out. Called on the audio thread.
void DoCheckForNoData();
// Helper method that stops, closes, and NULL:s |*stream_|.
- // Signals event when done if the event is not NULL.
- void DoStopCloseAndClearStream(base::WaitableEvent* done);
+ void DoStopCloseAndClearStream();
void SetDataIsActive(bool enabled);
bool GetDataIsActive();
- // Gives access to the message loop of the creating thread.
- scoped_refptr<base::MessageLoopProxy> creator_loop_;
+ // Gives access to the task runner of the creating thread.
+ scoped_refptr<base::SingleThreadTaskRunner> creator_task_runner_;
- // The message loop of audio-manager thread that this object runs on.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ // The task runner of audio-manager thread that this object runs on.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Contains the AudioInputController::EventHandler which receives state
// notifications from this class.
@@ -256,8 +292,8 @@ class MEDIA_EXPORT AudioInputController
AudioInputStream* stream_;
// |no_data_timer_| is used to call OnError() when we stop receiving
- // OnData() calls without an OnClose() call. This can occur
- // when an audio input device is unplugged whilst recording on Windows.
+ // OnData() calls. This can occur when an audio input device is unplugged
+ // whilst recording on Windows.
// See http://crbug.com/79936 for details.
// This member is only touched by the audio thread.
scoped_ptr<base::Timer> no_data_timer_;
@@ -284,6 +320,15 @@ class MEDIA_EXPORT AudioInputController
UserInputMonitor* user_input_monitor_;
+#if defined(AUDIO_POWER_MONITORING)
+ // Scans audio samples from OnData() as input to compute audio levels.
+ scoped_ptr<AudioPowerMonitor> audio_level_;
+
+ // We need these to be able to feed data to the AudioPowerMonitor.
+ media::AudioParameters audio_params_;
+ base::TimeTicks last_audio_level_log_time_;
+#endif
+
size_t prev_key_down_count_;
DISALLOW_COPY_AND_ASSIGN(AudioInputController);
diff --git a/chromium/media/audio/audio_input_controller_unittest.cc b/chromium/media/audio/audio_input_controller_unittest.cc
index a7bb600aaf4..e71232d5730 100644
--- a/chromium/media/audio/audio_input_controller_unittest.cc
+++ b/chromium/media/audio/audio_input_controller_unittest.cc
@@ -51,9 +51,13 @@ class MockAudioInputControllerEventHandler
MOCK_METHOD1(OnCreated, void(AudioInputController* controller));
MOCK_METHOD1(OnRecording, void(AudioInputController* controller));
- MOCK_METHOD1(OnError, void(AudioInputController* controller));
- MOCK_METHOD3(OnData, void(AudioInputController* controller,
- const uint8* data, uint32 size));
+ MOCK_METHOD2(OnError, void(AudioInputController* controller,
+ AudioInputController::ErrorCode error_code));
+ MOCK_METHOD2(OnData,
+ void(AudioInputController* controller, const AudioBus* data));
+ MOCK_METHOD2(OnLog,
+ void(AudioInputController* controller,
+ const std::string& message));
private:
DISALLOW_COPY_AND_ASSIGN(MockAudioInputControllerEventHandler);
@@ -113,10 +117,10 @@ TEST_F(AudioInputControllerTest, RecordAndClose) {
.Times(Exactly(1));
// OnData() shall be called ten times.
- EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
+ EXPECT_CALL(event_handler, OnData(NotNull(), NotNull()))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
- message_loop_.message_loop_proxy()));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, message_loop_.message_loop_proxy()));
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
AudioParameters params(AudioParameters::AUDIO_FAKE, kChannelLayout,
@@ -142,9 +146,11 @@ TEST_F(AudioInputControllerTest, RecordAndClose) {
}
// Test that the AudioInputController reports an error when the input stream
-// stops without an OnClose() callback. This can happen when the underlying
-// audio layer stops feeding data as a result of a removed microphone device.
-TEST_F(AudioInputControllerTest, RecordAndError) {
+// stops. This can happen when the underlying audio layer stops feeding data as
+// a result of a removed microphone device.
+// Disabled due to crbug.com/357569 and crbug.com/357501.
+// TODO(henrika): Remove the test when the timer workaround has been removed.
+TEST_F(AudioInputControllerTest, DISABLED_RecordAndError) {
MockAudioInputControllerEventHandler event_handler;
int count = 0;
@@ -157,14 +163,15 @@ TEST_F(AudioInputControllerTest, RecordAndError) {
.Times(Exactly(1));
// OnData() shall be called ten times.
- EXPECT_CALL(event_handler, OnData(NotNull(), NotNull(), _))
+ EXPECT_CALL(event_handler, OnData(NotNull(), NotNull()))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10,
- message_loop_.message_loop_proxy()));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, message_loop_.message_loop_proxy()));
// OnError() will be called after the data stream stops while the
// controller is in a recording state.
- EXPECT_CALL(event_handler, OnError(NotNull()))
+ EXPECT_CALL(event_handler, OnError(NotNull(),
+ AudioInputController::NO_DATA_ERROR))
.Times(Exactly(1))
.WillOnce(QuitMessageLoop(&message_loop_));
diff --git a/chromium/media/audio/audio_input_device.cc b/chromium/media/audio/audio_input_device.cc
index d1a6ab89f9f..bf140cbad4e 100644
--- a/chromium/media/audio/audio_input_device.cc
+++ b/chromium/media/audio/audio_input_device.cc
@@ -6,7 +6,7 @@
#include "base/basictypes.h"
#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
+#include "base/memory/scoped_vector.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "media/audio/audio_manager_base.h"
@@ -40,15 +40,16 @@ class AudioInputDevice::AudioThreadCallback
private:
int current_segment_id_;
+ ScopedVector<media::AudioBus> audio_buses_;
CaptureCallback* capture_callback_;
- scoped_ptr<AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
};
AudioInputDevice::AudioInputDevice(
scoped_ptr<AudioInputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop)
- : ScopedLoopObserver(io_loop),
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : ScopedTaskRunnerObserver(io_task_runner),
callback_(NULL),
ipc_(ipc.Pass()),
state_(IDLE),
@@ -78,7 +79,7 @@ void AudioInputDevice::Initialize(const AudioParameters& params,
void AudioInputDevice::Start() {
DCHECK(callback_) << "Initialize hasn't been called";
DVLOG(1) << "Start()";
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::StartUpOnIOThread, this));
}
@@ -91,7 +92,7 @@ void AudioInputDevice::Stop() {
stopping_hack_ = true;
}
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::ShutDownOnIOThread, this));
}
@@ -101,13 +102,13 @@ void AudioInputDevice::SetVolume(double volume) {
return;
}
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::SetVolumeOnIOThread, this, volume));
}
void AudioInputDevice::SetAutomaticGainControl(bool enabled) {
DVLOG(1) << "SetAutomaticGainControl(enabled=" << enabled << ")";
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioInputDevice::SetAutomaticGainControlOnIOThread,
this, enabled));
}
@@ -117,7 +118,7 @@ void AudioInputDevice::OnStreamCreated(
base::SyncSocket::Handle socket_handle,
int length,
int total_segments) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
#if defined(OS_WIN)
DCHECK(handle);
DCHECK(socket_handle);
@@ -153,7 +154,7 @@ void AudioInputDevice::OnVolume(double volume) {
void AudioInputDevice::OnStateChanged(
AudioInputIPCDelegate::State state) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Do nothing if the stream has been closed.
if (state_ < CREATING_STREAM)
@@ -186,7 +187,7 @@ void AudioInputDevice::OnStateChanged(
}
void AudioInputDevice::OnIPCClosed() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
state_ = IPC_CLOSED;
ipc_.reset();
}
@@ -198,7 +199,7 @@ AudioInputDevice::~AudioInputDevice() {
}
void AudioInputDevice::StartUpOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Make sure we don't call Start() more than once.
if (state_ != IDLE)
@@ -215,7 +216,7 @@ void AudioInputDevice::StartUpOnIOThread() {
}
void AudioInputDevice::ShutDownOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Close the stream, if we haven't already.
if (state_ >= CREATING_STREAM) {
@@ -240,13 +241,13 @@ void AudioInputDevice::ShutDownOnIOThread() {
}
void AudioInputDevice::SetVolumeOnIOThread(double volume) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ >= CREATING_STREAM)
ipc_->SetVolume(volume);
}
void AudioInputDevice::SetAutomaticGainControlOnIOThread(bool enabled) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ >= CREATING_STREAM) {
DLOG(WARNING) << "The AGC state can not be modified after starting.";
@@ -274,7 +275,6 @@ AudioInputDevice::AudioThreadCallback::AudioThreadCallback(
total_segments),
current_segment_id_(0),
capture_callback_(capture_callback) {
- audio_bus_ = AudioBus::Create(audio_parameters_);
}
AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
@@ -282,6 +282,17 @@ AudioInputDevice::AudioThreadCallback::~AudioThreadCallback() {
void AudioInputDevice::AudioThreadCallback::MapSharedMemory() {
shared_memory_.Map(memory_length_);
+
+ // Create vector of audio buses by wrapping existing blocks of memory.
+ uint8* ptr = static_cast<uint8*>(shared_memory_.memory());
+ for (int i = 0; i < total_segments_; ++i) {
+ media::AudioInputBuffer* buffer =
+ reinterpret_cast<media::AudioInputBuffer*>(ptr);
+ scoped_ptr<media::AudioBus> audio_bus =
+ media::AudioBus::WrapMemory(audio_parameters_, buffer->audio);
+ audio_buses_.push_back(audio_bus.release());
+ ptr += segment_length_;
+ }
}
void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
@@ -298,21 +309,17 @@ void AudioInputDevice::AudioThreadCallback::Process(int pending_data) {
double volume = buffer->params.volume;
bool key_pressed = buffer->params.key_pressed;
- int audio_delay_milliseconds = pending_data / bytes_per_ms_;
- int16* memory = reinterpret_cast<int16*>(&buffer->audio[0]);
- const int bytes_per_sample = sizeof(memory[0]);
-
- if (++current_segment_id_ >= total_segments_)
- current_segment_id_ = 0;
-
- // Deinterleave each channel and convert to 32-bit floating-point
- // with nominal range -1.0 -> +1.0.
- audio_bus_->FromInterleaved(memory, audio_bus_->frames(), bytes_per_sample);
+ // Use pre-allocated audio bus wrapping existing block of shared memory.
+ media::AudioBus* audio_bus = audio_buses_[current_segment_id_];
// Deliver captured data to the client in floating point format
// and update the audio-delay measurement.
+ int audio_delay_milliseconds = pending_data / bytes_per_ms_;
capture_callback_->Capture(
- audio_bus_.get(), audio_delay_milliseconds, volume, key_pressed);
+ audio_bus, audio_delay_milliseconds, volume, key_pressed);
+
+ if (++current_segment_id_ >= total_segments_)
+ current_segment_id_ = 0;
}
} // namespace media
diff --git a/chromium/media/audio/audio_input_device.h b/chromium/media/audio/audio_input_device.h
index bb7d0ff4f71..0c390028d27 100644
--- a/chromium/media/audio/audio_input_device.h
+++ b/chromium/media/audio/audio_input_device.h
@@ -62,7 +62,7 @@
#include "media/audio/audio_device_thread.h"
#include "media/audio/audio_input_ipc.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/scoped_loop_observer.h"
+#include "media/audio/scoped_task_runner_observer.h"
#include "media/base/audio_capturer_source.h"
#include "media/base/media_export.h"
@@ -77,11 +77,12 @@ namespace media {
class MEDIA_EXPORT AudioInputDevice
: NON_EXPORTED_BASE(public AudioCapturerSource),
NON_EXPORTED_BASE(public AudioInputIPCDelegate),
- NON_EXPORTED_BASE(public ScopedLoopObserver) {
+ NON_EXPORTED_BASE(public ScopedTaskRunnerObserver) {
public:
// NOTE: Clients must call Initialize() before using.
- AudioInputDevice(scoped_ptr<AudioInputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop);
+ AudioInputDevice(
+ scoped_ptr<AudioInputIPC> ipc,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
// AudioCapturerSource implementation.
virtual void Initialize(const AudioParameters& params,
diff --git a/chromium/media/audio/audio_input_ipc.h b/chromium/media/audio/audio_input_ipc.h
index 0e6f2c34c4c..ce7b034d6a5 100644
--- a/chromium/media/audio/audio_input_ipc.h
+++ b/chromium/media/audio/audio_input_ipc.h
@@ -21,7 +21,8 @@ class MEDIA_EXPORT AudioInputIPCDelegate {
enum State {
kRecording,
kStopped,
- kError
+ kError,
+ kStateLast = kError
};
// Called when an AudioInputController has been created.
diff --git a/chromium/media/audio/audio_input_unittest.cc b/chromium/media/audio/audio_input_unittest.cc
index 838cab3867a..0bae9db7c4e 100644
--- a/chromium/media/audio/audio_input_unittest.cc
+++ b/chromium/media/audio/audio_input_unittest.cc
@@ -6,6 +6,8 @@
#include "base/environment.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager_base.h"
@@ -13,32 +15,20 @@
namespace media {
-static const int kSamplingRate = 8000;
-static const int kSamplesPerPacket = kSamplingRate / 20;
-
// This class allows to find out if the callbacks are occurring as
// expected and if any error has been reported.
class TestInputCallback : public AudioInputStream::AudioInputCallback {
public:
- explicit TestInputCallback(int max_data_bytes)
+ explicit TestInputCallback()
: callback_count_(0),
- had_error_(0),
- max_data_bytes_(max_data_bytes) {
+ had_error_(0) {
}
virtual void OnData(AudioInputStream* stream,
- const uint8* data,
- uint32 size,
+ const AudioBus* source,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
++callback_count_;
- // Read the first byte to make sure memory is good.
- if (size) {
- ASSERT_LE(static_cast<int>(size), max_data_bytes_);
- int value = data[0];
- EXPECT_GE(value, 0);
- }
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {
++had_error_;
}
@@ -54,68 +44,144 @@ class TestInputCallback : public AudioInputStream::AudioInputCallback {
private:
int callback_count_;
int had_error_;
- int max_data_bytes_;
};
-static bool CanRunAudioTests(AudioManager* audio_man) {
- bool has_input = audio_man->HasAudioInputDevices();
+class AudioInputTest : public testing::Test {
+ public:
+ AudioInputTest() :
+ message_loop_(base::MessageLoop::TYPE_UI),
+ audio_manager_(AudioManager::CreateForTesting()),
+ audio_input_stream_(NULL) {
+ // Wait for the AudioManager to finish any initialization on the audio loop.
+ base::RunLoop().RunUntilIdle();
+ }
- if (!has_input)
- LOG(WARNING) << "No input devices detected";
+ virtual ~AudioInputTest() {
+ base::RunLoop().RunUntilIdle();
+ }
- return has_input;
-}
+ protected:
+ AudioManager* audio_manager() { return audio_manager_.get(); }
-static AudioInputStream* CreateTestAudioInputStream(AudioManager* audio_man) {
- AudioInputStream* ais = audio_man->MakeAudioInputStream(
- AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
- kSamplingRate, 16, kSamplesPerPacket),
- AudioManagerBase::kDefaultDeviceId);
- EXPECT_TRUE(NULL != ais);
- return ais;
-}
+ bool CanRunAudioTests() {
+ bool has_input = audio_manager()->HasAudioInputDevices();
+ LOG_IF(WARNING, !has_input) << "No input devices detected";
+ return has_input;
+ }
-// Test that AudioInputStream rejects out of range parameters.
-TEST(AudioInputTest, SanityOnMakeParams) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
- return;
+ void MakeAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::MakeAudioInputStream,
+ base::Unretained(this)));
+ }
- AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_7_1, 8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80,
- 1000 * kSamplesPerPacket),
- AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, -16,
- kSamplesPerPacket), AudioManagerBase::kDefaultDeviceId));
- EXPECT_TRUE(NULL == audio_man->MakeAudioInputStream(
- AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 16, -1024),
- AudioManagerBase::kDefaultDeviceId));
-}
+ void CloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputStream::Close,
+ base::Unretained(audio_input_stream_)));
+ audio_input_stream_ = NULL;
+ }
+
+ void OpenAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::OpenAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenStopAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::OpenStopAndClose,
+ base::Unretained(this)));
+ }
+
+ void OpenAndStartAudioInputStreamOnAudioThread(
+ AudioInputStream::AudioInputCallback* sink) {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::OpenAndStart,
+ base::Unretained(this),
+ sink));
+ }
+
+ void StopAndCloseAudioInputStreamOnAudioThread() {
+ RunOnAudioThread(
+ base::Bind(&AudioInputTest::StopAndClose,
+ base::Unretained(this)));
+ }
+
+ void MakeAudioInputStream() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ AudioParameters params = audio_manager()->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId);
+ audio_input_stream_ = audio_manager()->MakeAudioInputStream(params,
+ AudioManagerBase::kDefaultDeviceId);
+ EXPECT_TRUE(audio_input_stream_);
+ }
+
+ void OpenAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ void OpenAndStart(AudioInputStream::AudioInputCallback* sink) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Start(sink);
+ }
+
+ void OpenStopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ EXPECT_TRUE(audio_input_stream_->Open());
+ audio_input_stream_->Stop();
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ void StopAndClose() {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ audio_input_stream_->Stop();
+ audio_input_stream_->Close();
+ audio_input_stream_ = NULL;
+ }
+
+ // Synchronously runs the provided callback/closure on the audio thread.
+ void RunOnAudioThread(const base::Closure& closure) {
+ if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ base::WaitableEvent event(false, false);
+ audio_manager()->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioInputTest::RunOnAudioThreadImpl,
+ base::Unretained(this),
+ closure,
+ &event));
+ event.Wait();
+ } else {
+ closure.Run();
+ }
+ }
+
+ void RunOnAudioThreadImpl(const base::Closure& closure,
+ base::WaitableEvent* event) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ closure.Run();
+ event->Signal();
+ }
+
+ base::MessageLoop message_loop_;
+ scoped_ptr<AudioManager> audio_manager_;
+ AudioInputStream* audio_input_stream_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AudioInputTest);
+};
// Test create and close of an AudioInputStream without recording audio.
-TEST(AudioInputTest, CreateAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+TEST_F(AudioInputTest, CreateAndClose) {
+ if (!CanRunAudioTests())
return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- ais->Close();
+ MakeAudioInputStreamOnAudioThread();
+ CloseAudioInputStreamOnAudioThread();
}
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
@@ -125,13 +191,11 @@ TEST(AudioInputTest, CreateAndClose) {
#define MAYBE_OpenAndClose OpenAndClose
#endif
// Test create, open and close of an AudioInputStream without recording audio.
-TEST(AudioInputTest, MAYBE_OpenAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+TEST_F(AudioInputTest, MAYBE_OpenAndClose) {
+ if (!CanRunAudioTests())
return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
- ais->Close();
+ MakeAudioInputStreamOnAudioThread();
+ OpenAndCloseAudioInputStreamOnAudioThread();
}
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
@@ -141,14 +205,11 @@ TEST(AudioInputTest, MAYBE_OpenAndClose) {
#define MAYBE_OpenStopAndClose OpenStopAndClose
#endif
// Test create, open, stop and close of an AudioInputStream without recording.
-TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+TEST_F(AudioInputTest, MAYBE_OpenStopAndClose) {
+ if (!CanRunAudioTests())
return;
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
- ais->Stop();
- ais->Close();
+ MakeAudioInputStreamOnAudioThread();
+ OpenStopAndCloseAudioInputStreamOnAudioThread();
}
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
@@ -158,28 +219,25 @@ TEST(AudioInputTest, MAYBE_OpenStopAndClose) {
#define MAYBE_Record Record
#endif
// Test a normal recording sequence using an AudioInputStream.
-TEST(AudioInputTest, MAYBE_Record) {
- scoped_ptr<AudioManager> audio_man(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_man.get()))
+// Very simple test which starts capturing during half a second and verifies
+// that recording starts.
+TEST_F(AudioInputTest, MAYBE_Record) {
+ if (!CanRunAudioTests())
return;
- base::MessageLoop message_loop(base::MessageLoop::TYPE_DEFAULT);
- AudioInputStream* ais = CreateTestAudioInputStream(audio_man.get());
- EXPECT_TRUE(ais->Open());
-
- TestInputCallback test_callback(kSamplesPerPacket * 4);
- ais->Start(&test_callback);
- // Verify at least 500ms worth of audio was recorded, after giving sufficient
- // extra time.
- message_loop.PostDelayedTask(
+ MakeAudioInputStreamOnAudioThread();
+
+ TestInputCallback test_callback;
+ OpenAndStartAudioInputStreamOnAudioThread(&test_callback);
+
+ message_loop_.PostDelayedTask(
FROM_HERE,
base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(690));
- message_loop.Run();
- EXPECT_GE(test_callback.callback_count(), 1);
+ base::TimeDelta::FromMilliseconds(500));
+ message_loop_.Run();
+ EXPECT_GE(test_callback.callback_count(), 2);
EXPECT_FALSE(test_callback.had_error());
- ais->Stop();
- ais->Close();
+ StopAndCloseAudioInputStreamOnAudioThread();
}
} // namespace media
diff --git a/chromium/media/audio/audio_io.h b/chromium/media/audio/audio_io.h
index 473af0d512f..1e1eba420b1 100644
--- a/chromium/media/audio/audio_io.h
+++ b/chromium/media/audio/audio_io.h
@@ -63,10 +63,6 @@ class MEDIA_EXPORT AudioOutputStream {
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) = 0;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) = 0;
-
// There was an error while playing a buffer. Audio source cannot be
// destroyed yet. No direct action needed by the AudioStream, but it is
// a good place to stop accumulating sound data since is is likely that
@@ -113,13 +109,18 @@ class MEDIA_EXPORT AudioInputStream {
// Called by the audio recorder when a full packet of audio data is
// available. This is called from a special audio thread and the
// implementation should return as soon as possible.
- virtual void OnData(AudioInputStream* stream, const uint8* src,
- uint32 size, uint32 hardware_delay_bytes,
- double volume) = 0;
-
- // The stream is done with this callback, the last call received by this
- // audio sink.
- virtual void OnClose(AudioInputStream* stream) = 0;
+ // TODO(henrika): should be pure virtual when old OnData() is phased out.
+ virtual void OnData(AudioInputStream* stream,
+ const AudioBus* source,
+ uint32 hardware_delay_bytes,
+ double volume) {};
+
+ // TODO(henrika): don't use; to be removed.
+ virtual void OnData(AudioInputStream* stream,
+ const uint8* src,
+ uint32 size,
+ uint32 hardware_delay_bytes,
+ double volume) {};
// There was an error while recording audio. The audio sink cannot be
// destroyed yet. No direct action needed by the AudioInputStream, but it
diff --git a/chromium/media/audio/audio_logging.h b/chromium/media/audio/audio_logging.h
index 1d8366bad75..913b8ec4433 100644
--- a/chromium/media/audio/audio_logging.h
+++ b/chromium/media/audio/audio_logging.h
@@ -20,13 +20,11 @@ class AudioLog {
virtual ~AudioLog() {}
// Called when an audio component is created. |params| are the parameters of
- // the created stream. |input_device_id| and |output_device_id| are the
- // respective device ids for input and output. Either one or both may be
- // specified.
+ // the created stream. |device_id| is the id of the audio device opened by
+ // the created stream.
virtual void OnCreated(int component_id,
const media::AudioParameters& params,
- const std::string& input_device_id,
- const std::string& output_device_id) = 0;
+ const std::string& device_id) = 0;
// Called when an audio component is started, generally this is synonymous
// with "playing."
diff --git a/chromium/media/audio/audio_low_latency_input_output_unittest.cc b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
index c0cfa6937cf..eefd3800aab 100644
--- a/chromium/media/audio/audio_low_latency_input_output_unittest.cc
+++ b/chromium/media/audio/audio_low_latency_input_output_unittest.cc
@@ -6,7 +6,6 @@
#include "base/environment.h"
#include "base/file_util.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
#include "base/path_service.h"
#include "base/synchronization/lock.h"
#include "base/test/test_timeouts.h"
@@ -88,7 +87,7 @@ class MockAudioManager : public AudioManagerAnyPlatform {
MockAudioManager() : AudioManagerAnyPlatform(&fake_audio_log_factory_) {}
virtual ~MockAudioManager() {}
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE {
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE {
return base::MessageLoop::current()->message_loop_proxy();
}
@@ -185,7 +184,7 @@ class FullDuplexAudioSinkSource
// AudioInputStream::AudioInputCallback.
virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) OVERRIDE {
base::AutoLock lock(lock_);
@@ -204,17 +203,17 @@ class FullDuplexAudioSinkSource
++input_elements_to_write_;
}
+ // TODO(henrika): fix this and use AudioFifo instead.
// Store the captured audio packet in a seekable media buffer.
- if (!buffer_->Append(src, size)) {
- // An attempt to write outside the buffer limits has been made.
- // Double the buffer capacity to ensure that we have a buffer large
- // enough to handle the current sample test scenario.
- buffer_->set_forward_capacity(2 * buffer_->forward_capacity());
- buffer_->Clear();
- }
+ // if (!buffer_->Append(src, size)) {
+ // An attempt to write outside the buffer limits has been made.
+ // Double the buffer capacity to ensure that we have a buffer large
+ // enough to handle the current sample test scenario.
+ // buffer_->set_forward_capacity(2 * buffer_->forward_capacity());
+ // buffer_->Clear();
+ // }
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
// AudioOutputStream::AudioSourceCallback.
@@ -256,13 +255,6 @@ class FullDuplexAudioSinkSource
return 0;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) OVERRIDE {}
protected:
@@ -314,8 +306,7 @@ class AudioOutputStreamTraits {
static StreamType* CreateStream(AudioManager* audio_manager,
const AudioParameters& params) {
- return audio_manager->MakeAudioOutputStream(params, std::string(),
- std::string());
+ return audio_manager->MakeAudioOutputStream(params, std::string());
}
};
diff --git a/chromium/media/audio/audio_manager.h b/chromium/media/audio/audio_manager.h
index 0ca468ed4dd..915308ef77d 100644
--- a/chromium/media/audio/audio_manager.h
+++ b/chromium/media/audio/audio_manager.h
@@ -15,8 +15,7 @@
#include "media/audio/audio_parameters.h"
namespace base {
-class MessageLoop;
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -68,14 +67,14 @@ class MEDIA_EXPORT AudioManager {
// recording.
//
// Not threadsafe; in production this should only be called from the
- // Audio IO thread (see GetMessageLoop).
+ // Audio worker thread (see GetWorkerTaskRunner()).
virtual void GetAudioInputDeviceNames(AudioDeviceNames* device_names) = 0;
// Appends a list of available output devices to |device_names|,
// which must initially be empty.
//
// Not threadsafe; in production this should only be called from the
- // Audio IO thread (see GetMessageLoop).
+ // Audio worker thread (see GetWorkerTaskRunner()).
virtual void GetAudioOutputDeviceNames(AudioDeviceNames* device_names) = 0;
// Factory for all the supported stream formats. |params| defines parameters
@@ -89,11 +88,6 @@ class MEDIA_EXPORT AudioManager {
// To create a stream for the default output device, pass an empty string
// for |device_id|, otherwise the specified audio device will be opened.
//
- // The |input_device_id| is used for low-latency unified streams
- // (input+output) only and then only if the audio parameters specify a >0
- // input channel count. In other cases this id is ignored and should be
- // empty.
- //
// Returns NULL if the combination of the parameters is not supported, or if
// we have reached some other platform specific limit.
//
@@ -106,8 +100,7 @@ class MEDIA_EXPORT AudioManager {
// Do not free the returned AudioOutputStream. It is owned by AudioManager.
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) = 0;
+ const std::string& device_id) = 0;
// Creates new audio output proxy. A proxy implements
// AudioOutputStream interface, but unlike regular output stream
@@ -115,8 +108,7 @@ class MEDIA_EXPORT AudioManager {
// sound is actually playing.
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) = 0;
+ const std::string& device_id) = 0;
// Factory to create audio recording streams.
// |channels| can be 1 or 2.
@@ -133,13 +125,13 @@ class MEDIA_EXPORT AudioManager {
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) = 0;
- // Returns message loop used for audio IO.
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() = 0;
+ // Returns the task runner used for audio IO.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() = 0;
- // Heavyweight tasks should use GetWorkerLoop() instead of GetMessageLoop().
- // On most platforms they are the same, but some share the UI loop with the
- // audio IO loop.
- virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() = 0;
+ // Heavyweight tasks should use GetWorkerTaskRunner() instead of
+ // GetTaskRunner(). On most platforms they are the same, but some share the
+ // UI loop with the audio IO loop.
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner() = 0;
// Allows clients to listen for device state changes; e.g. preferred sample
// rate or channel layout changes. The typical response to receiving this
@@ -175,7 +167,8 @@ class MEDIA_EXPORT AudioManager {
// If the hardware has only an input device (e.g. a webcam), the return value
// will be empty (which the caller can then interpret to be the default output
// device). Implementations that don't yet support this feature, must return
- // an empty string.
+ // an empty string. Must be called on the audio worker thread (see
+ // GetWorkerTaskRunner()).
virtual std::string GetAssociatedOutputDeviceID(
const std::string& input_device_id) = 0;
@@ -184,11 +177,6 @@ class MEDIA_EXPORT AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) = 0;
- // Called when a component has detected a OS level audio wedge. Shuts down
- // all active audio streams and then restarts them transparently. See
- // http://crbug.com/160920
- virtual void FixWedgedAudio() = 0;
-
protected:
AudioManager();
diff --git a/chromium/media/audio/audio_manager_base.cc b/chromium/media/audio/audio_manager_base.cc
index f7b590ae724..3b36b106fa6 100644
--- a/chromium/media/audio/audio_manager_base.cc
+++ b/chromium/media/audio/audio_manager_base.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/strings/string_number_conversions.h"
#include "build/build_config.h"
#include "media/audio/audio_output_dispatcher_impl.h"
@@ -29,7 +28,7 @@ static const int kDefaultMaxOutputStreams = 16;
// for all platforms.
static const int kDefaultMaxInputStreams = 16;
-static const int kMaxInputChannels = 2;
+static const int kMaxInputChannels = 3;
const char AudioManagerBase::kDefaultDeviceName[] = "Default";
const char AudioManagerBase::kDefaultDeviceId[] = "default";
@@ -38,17 +37,14 @@ const char AudioManagerBase::kLoopbackInputDeviceId[] = "loopback";
struct AudioManagerBase::DispatcherParams {
DispatcherParams(const AudioParameters& input,
const AudioParameters& output,
- const std::string& output_device_id,
- const std::string& input_device_id)
+ const std::string& output_device_id)
: input_params(input),
output_params(output),
- input_device_id(input_device_id),
output_device_id(output_device_id) {}
~DispatcherParams() {}
const AudioParameters input_params;
const AudioParameters output_params;
- const std::string input_device_id;
const std::string output_device_id;
scoped_refptr<AudioOutputDispatcher> dispatcher;
@@ -64,13 +60,11 @@ class AudioManagerBase::CompareByParams {
// We will reuse the existing dispatcher when:
// 1) Unified IO is not used, input_params and output_params of the
// existing dispatcher are the same as the requested dispatcher.
- // 2) Unified IO is used, input_params, output_params and input_device_id
- // of the existing dispatcher are the same as the request dispatcher.
+ // 2) Unified IO is used, input_params and output_params of the existing
+ // dispatcher are the same as the request dispatcher.
return (dispatcher_->input_params == dispatcher_in->input_params &&
dispatcher_->output_params == dispatcher_in->output_params &&
- dispatcher_->output_device_id == dispatcher_in->output_device_id &&
- (!dispatcher_->input_params.input_channels() ||
- dispatcher_->input_device_id == dispatcher_in->input_device_id));
+ dispatcher_->output_device_id == dispatcher_in->output_device_id);
}
private:
@@ -95,17 +89,14 @@ AudioManagerBase::AudioManagerBase(AudioLogFactory* audio_log_factory)
// case is sadly the browser UI thread. Failure to execute calls on the right
// thread leads to crashes and odd behavior. See http://crbug.com/158170.
// TODO(dalecurtis): We should require the message loop to be passed in.
- const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
- if (!cmd_line->HasSwitch(switches::kDisableMainThreadAudio) &&
- base::MessageLoopProxy::current().get() &&
- base::MessageLoop::current()->IsType(base::MessageLoop::TYPE_UI)) {
- message_loop_ = base::MessageLoopProxy::current();
+ if (base::MessageLoopForUI::IsCurrent()) {
+ task_runner_ = base::MessageLoopProxy::current();
return;
}
#endif
CHECK(audio_thread_.Start());
- message_loop_ = audio_thread_.message_loop_proxy();
+ task_runner_ = audio_thread_.message_loop_proxy();
}
AudioManagerBase::~AudioManagerBase() {
@@ -125,11 +116,12 @@ base::string16 AudioManagerBase::GetAudioInputDeviceModel() {
return base::string16();
}
-scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetMessageLoop() {
- return message_loop_;
+scoped_refptr<base::SingleThreadTaskRunner> AudioManagerBase::GetTaskRunner() {
+ return task_runner_;
}
-scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
+scoped_refptr<base::SingleThreadTaskRunner>
+AudioManagerBase::GetWorkerTaskRunner() {
// Lazily start the worker thread.
if (!audio_thread_.IsRunning())
CHECK(audio_thread_.Start());
@@ -139,11 +131,10 @@ scoped_refptr<base::MessageLoopProxy> AudioManagerBase::GetWorkerLoop() {
AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
// TODO(miu): Fix ~50 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
- // DCHECK(message_loop_->BelongsToCurrentThread());
+ // DCHECK(task_runner_->BelongsToCurrentThread());
if (!params.IsValid()) {
DLOG(ERROR) << "Audio parameters are invalid";
@@ -170,7 +161,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStream(
stream = MakeLinearOutputStream(params);
break;
case AudioParameters::AUDIO_PCM_LOW_LATENCY:
- stream = MakeLowLatencyOutputStream(params, device_id, input_device_id);
+ stream = MakeLowLatencyOutputStream(params, device_id);
break;
case AudioParameters::AUDIO_FAKE:
stream = FakeAudioOutputStream::MakeFakeStream(this, params);
@@ -192,7 +183,7 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
const std::string& device_id) {
// TODO(miu): Fix ~20 call points across several unit test modules to call
// this method on the audio thread, then uncomment the following:
- // DCHECK(message_loop_->BelongsToCurrentThread());
+ // DCHECK(task_runner_->BelongsToCurrentThread());
if (!params.IsValid() || (params.channels() > kMaxInputChannels) ||
device_id.empty()) {
@@ -232,9 +223,8 @@ AudioInputStream* AudioManagerBase::MakeAudioInputStream(
AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ const std::string& device_id) {
+ DCHECK(task_runner_->BelongsToCurrentThread());
// If the caller supplied an empty device id to select the default device,
// we fetch the actual device id of the default device so that the lookup
@@ -273,8 +263,7 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
}
DispatcherParams* dispatcher_params =
- new DispatcherParams(params, output_params, output_device_id,
- input_device_id);
+ new DispatcherParams(params, output_params, output_device_id);
AudioOutputDispatchers::iterator it =
std::find_if(output_dispatchers_.begin(), output_dispatchers_.end(),
@@ -289,12 +278,12 @@ AudioOutputStream* AudioManagerBase::MakeAudioOutputStreamProxy(
scoped_refptr<AudioOutputDispatcher> dispatcher;
if (output_params.format() != AudioParameters::AUDIO_FAKE) {
dispatcher = new AudioOutputResampler(this, params, output_params,
- output_device_id, input_device_id,
+ output_device_id,
kCloseDelay);
} else {
dispatcher = new AudioOutputDispatcherImpl(this, output_params,
output_device_id,
- input_device_id, kCloseDelay);
+ kCloseDelay);
}
dispatcher_params->dispatcher = dispatcher;
@@ -332,10 +321,10 @@ void AudioManagerBase::ReleaseInputStream(AudioInputStream* stream) {
void AudioManagerBase::Shutdown() {
// Only true when we're sharing the UI message loop with the browser. The UI
// loop is no longer running at this time and browser destruction is imminent.
- if (message_loop_->BelongsToCurrentThread()) {
+ if (task_runner_->BelongsToCurrentThread()) {
ShutdownOnAudioThread();
} else {
- message_loop_->PostTask(FROM_HERE, base::Bind(
+ task_runner_->PostTask(FROM_HERE, base::Bind(
&AudioManagerBase::ShutdownOnAudioThread, base::Unretained(this)));
}
@@ -344,39 +333,27 @@ void AudioManagerBase::Shutdown() {
}
void AudioManagerBase::ShutdownOnAudioThread() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- for (; it != output_dispatchers_.end(); ++it) {
- scoped_refptr<AudioOutputDispatcher>& dispatcher = (*it)->dispatcher;
- dispatcher->Shutdown();
-
- // All AudioOutputProxies must have been freed before Shutdown is called.
- // If they still exist, things will go bad. They have direct pointers to
- // both physical audio stream objects that belong to the dispatcher as
- // well as the message loop of the audio thread that will soon go away.
- // So, better crash now than later.
- DCHECK(dispatcher->HasOneRef()) << "AudioOutputProxies are still alive";
- dispatcher = NULL;
+ DCHECK(task_runner_->BelongsToCurrentThread());
+ while (!output_dispatchers_.empty()) {
+ output_dispatchers_.back()->dispatcher->Shutdown();
+ output_dispatchers_.pop_back();
}
-
- output_dispatchers_.clear();
}
void AudioManagerBase::AddOutputDeviceChangeListener(
AudioDeviceListener* listener) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
output_listeners_.AddObserver(listener);
}
void AudioManagerBase::RemoveOutputDeviceChangeListener(
AudioDeviceListener* listener) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
output_listeners_.RemoveObserver(listener);
}
void AudioManagerBase::NotifyAllOutputDeviceChangeListeners() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DVLOG(1) << "Firing OnDeviceChange() notifications.";
FOR_EACH_OBSERVER(AudioDeviceListener, output_listeners_, OnDeviceChange());
}
@@ -400,7 +377,6 @@ AudioParameters AudioManagerBase::GetInputStreamParameters(
std::string AudioManagerBase::GetAssociatedOutputDeviceID(
const std::string& input_device_id) {
- NOTIMPLEMENTED();
return "";
}
@@ -424,32 +400,4 @@ scoped_ptr<AudioLog> AudioManagerBase::CreateAudioLog(
return audio_log_factory_->CreateAudioLog(component);
}
-void AudioManagerBase::FixWedgedAudio() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-#if defined(OS_MACOSX)
- // Through trial and error, we've found that one way to restore audio after a
- // hang is to close all outstanding audio streams. Once all streams have been
- // closed, new streams appear to work correctly.
- //
- // In Chrome terms, this means we need to ask all AudioOutputDispatchers to
- // close all Open()'d streams. Once all streams across all dispatchers have
- // been closed, we ask for all previously Start()'d streams to be recreated
- // using the same AudioSourceCallback they had before.
- //
- // Since this operation takes place on the audio thread we can be sure that no
- // other state-changing stream operations will take place while the fix is in
- // progress.
- //
- // See http://crbug.com/160920 for additional details.
- for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- it != output_dispatchers_.end(); ++it) {
- (*it)->dispatcher->CloseStreamsForWedgeFix();
- }
- for (AudioOutputDispatchers::iterator it = output_dispatchers_.begin();
- it != output_dispatchers_.end(); ++it) {
- (*it)->dispatcher->RestartStreamsForWedgeFix();
- }
-#endif
-}
-
} // namespace media
diff --git a/chromium/media/audio/audio_manager_base.h b/chromium/media/audio/audio_manager_base.h
index 09b021a0d2b..bc13ee5a167 100644
--- a/chromium/media/audio/audio_manager_base.h
+++ b/chromium/media/audio/audio_manager_base.h
@@ -48,8 +48,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual ~AudioManagerBase();
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner()
+ OVERRIDE;
virtual base::string16 GetAudioInputDeviceModel() OVERRIDE;
@@ -63,16 +64,14 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioOutputStream* MakeAudioOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeAudioInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioOutputStream* MakeAudioOutputStreamProxy(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
// Called internally by the audio stream when it has been closed.
virtual void ReleaseOutputStream(AudioOutputStream* stream);
@@ -84,11 +83,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
const AudioParameters& params) = 0;
// Creates the output stream for the |AUDIO_PCM_LOW_LATENCY| format.
- // |input_device_id| is used by unified IO to open the correct input device.
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) = 0;
+ const std::string& device_id) = 0;
// Creates the input stream for the |AUDIO_PCM_LINEAR| format. The legacy
// name is also from |AUDIO_PCM_LINEAR|.
@@ -99,7 +96,7 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) = 0;
- // Listeners will be notified on the AudioManager::GetMessageLoop() loop.
+ // Listeners will be notified on the GetTaskRunner() task runner.
virtual void AddOutputDeviceChangeListener(
AudioDeviceListener* listener) OVERRIDE;
virtual void RemoveOutputDeviceChangeListener(
@@ -118,7 +115,9 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) OVERRIDE;
- virtual void FixWedgedAudio() OVERRIDE;
+ // Get number of input or output streams.
+ int input_stream_count() const { return num_input_streams_; }
+ int output_stream_count() const { return num_output_streams_; }
protected:
AudioManagerBase(AudioLogFactory* audio_log_factory);
@@ -155,10 +154,6 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// Implementations that don't yet support this should return an empty string.
virtual std::string GetDefaultOutputDeviceID();
- // Get number of input or output streams.
- int input_stream_count() { return num_input_streams_; }
- int output_stream_count() { return num_output_streams_; }
-
private:
struct DispatcherParams;
typedef ScopedVector<DispatcherParams> AudioOutputDispatchers;
@@ -187,10 +182,10 @@ class MEDIA_EXPORT AudioManagerBase : public AudioManager {
// Thread used to interact with audio streams created by this audio manager.
base::Thread audio_thread_;
- // The message loop of the audio thread this object runs on. Used for internal
+ // The task runner of the audio thread this object runs on. Used for internal
// tasks which run on the audio thread even after Shutdown() has been started
- // and GetMessageLoop() starts returning NULL.
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ // and GetTaskRunner() starts returning NULL.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
// Map of cached AudioOutputDispatcher instances. Must only be touched
// from the audio thread (no locking).
diff --git a/chromium/media/audio/audio_manager_unittest.cc b/chromium/media/audio/audio_manager_unittest.cc
index 8c6cc10b423..902618ebd92 100644
--- a/chromium/media/audio/audio_manager_unittest.cc
+++ b/chromium/media/audio/audio_manager_unittest.cc
@@ -5,6 +5,7 @@
#include "base/environment.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/fake_audio_log_factory.h"
@@ -28,8 +29,7 @@ namespace media {
// Test fixture which allows us to override the default enumeration API on
// Windows.
-class AudioManagerTest
- : public ::testing::Test {
+class AudioManagerTest : public ::testing::Test {
protected:
AudioManagerTest()
: audio_manager_(AudioManager::CreateForTesting())
@@ -37,8 +37,16 @@ class AudioManagerTest
, com_init_(base::win::ScopedCOMInitializer::kMTA)
#endif
{
+ // Wait for audio thread initialization to complete. Otherwise the
+ // enumeration type may not have been set yet.
+ base::WaitableEvent event(false, false);
+ audio_manager_->GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &base::WaitableEvent::Signal, base::Unretained(&event)));
+ event.Wait();
}
+ AudioManager* audio_manager() { return audio_manager_.get(); };
+
#if defined(OS_WIN)
bool SetMMDeviceEnumeration() {
AudioManagerWin* amw = static_cast<AudioManagerWin*>(audio_manager_.get());
@@ -122,6 +130,29 @@ class AudioManagerTest
}
#endif
+ // Synchronously runs the provided callback/closure on the audio thread.
+ void RunOnAudioThread(const base::Closure& closure) {
+ if (!audio_manager()->GetTaskRunner()->BelongsToCurrentThread()) {
+ base::WaitableEvent event(false, false);
+ audio_manager_->GetTaskRunner()->PostTask(
+ FROM_HERE,
+ base::Bind(&AudioManagerTest::RunOnAudioThreadImpl,
+ base::Unretained(this),
+ closure,
+ &event));
+ event.Wait();
+ } else {
+ closure.Run();
+ }
+ }
+
+ void RunOnAudioThreadImpl(const base::Closure& closure,
+ base::WaitableEvent* event) {
+ DCHECK(audio_manager()->GetTaskRunner()->BelongsToCurrentThread());
+ closure.Run();
+ event->Signal();
+ }
+
FakeAudioLogFactory fake_audio_log_factory_;
scoped_ptr<AudioManager> audio_manager_;
@@ -137,7 +168,10 @@ TEST_F(AudioManagerTest, EnumerateInputDevices) {
return;
AudioDeviceNames device_names;
- audio_manager_->GetAudioInputDeviceNames(&device_names);
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioInputDeviceNames,
+ base::Unretained(audio_manager()),
+ &device_names));
CheckDeviceNames(device_names);
}
@@ -147,7 +181,10 @@ TEST_F(AudioManagerTest, EnumerateOutputDevices) {
return;
AudioDeviceNames device_names;
- audio_manager_->GetAudioOutputDeviceNames(&device_names);
+ RunOnAudioThread(
+ base::Bind(&AudioManager::GetAudioOutputDeviceNames,
+ base::Unretained(audio_manager()),
+ &device_names));
CheckDeviceNames(device_names);
}
diff --git a/chromium/media/audio/audio_output_controller.cc b/chromium/media/audio/audio_output_controller.cc
index 92f9f25de53..232b77d727d 100644
--- a/chromium/media/audio/audio_output_controller.cc
+++ b/chromium/media/audio/audio_output_controller.cc
@@ -6,7 +6,6 @@
#include "base/bind.h"
#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
#include "base/task_runner_util.h"
#include "base/threading/platform_thread.h"
@@ -14,7 +13,6 @@
#include "build/build_config.h"
#include "media/base/scoped_histogram_timer.h"
-using base::Time;
using base::TimeDelta;
namespace media {
@@ -23,35 +21,24 @@ namespace media {
// Time constant for AudioPowerMonitor. See AudioPowerMonitor ctor comments for
// semantics. This value was arbitrarily chosen, but seems to work well.
static const int kPowerMeasurementTimeConstantMillis = 10;
-
-// Desired frequency of calls to EventHandler::OnPowerMeasured() for reporting
-// power levels in the audio signal.
-static const int kPowerMeasurementsPerSecond = 4;
#endif
-// Polling-related constants.
-const int AudioOutputController::kPollNumAttempts = 3;
-const int AudioOutputController::kPollPauseInMilliseconds = 3;
-
AudioOutputController::AudioOutputController(
AudioManager* audio_manager,
EventHandler* handler,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
SyncReader* sync_reader)
: audio_manager_(audio_manager),
params_(params),
handler_(handler),
output_device_id_(output_device_id),
- input_device_id_(input_device_id),
stream_(NULL),
diverting_to_stream_(NULL),
volume_(1.0),
state_(kEmpty),
- num_allowed_io_(0),
sync_reader_(sync_reader),
- message_loop_(audio_manager->GetMessageLoop()),
+ message_loop_(audio_manager->GetTaskRunner()),
#if defined(AUDIO_POWER_MONITORING)
power_monitor_(
params.sample_rate(),
@@ -74,7 +61,6 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
EventHandler* event_handler,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
SyncReader* sync_reader) {
DCHECK(audio_manager);
DCHECK(sync_reader);
@@ -83,8 +69,7 @@ scoped_refptr<AudioOutputController> AudioOutputController::Create(
return NULL;
scoped_refptr<AudioOutputController> controller(new AudioOutputController(
- audio_manager, event_handler, params, output_device_id, input_device_id,
- sync_reader));
+ audio_manager, event_handler, params, output_device_id, sync_reader));
controller->message_loop_->PostTask(FROM_HERE, base::Bind(
&AudioOutputController::DoCreate, controller, false));
return controller;
@@ -143,8 +128,7 @@ void AudioOutputController::DoCreate(bool is_for_device_change) {
stream_ = diverting_to_stream_ ?
diverting_to_stream_ :
- audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_,
- input_device_id_);
+ audio_manager_->MakeAudioOutputStreamProxy(params_, output_device_id_);
if (!stream_) {
state_ = kError;
handler_->OnError();
@@ -188,18 +172,6 @@ void AudioOutputController::DoPlay() {
state_ = kPlaying;
-#if defined(AUDIO_POWER_MONITORING)
- power_monitor_.Reset();
- power_poll_callback_.Reset(
- base::Bind(&AudioOutputController::ReportPowerMeasurementPeriodically,
- this));
- // Run the callback to send an initial notification that we're starting in
- // silence, and to schedule periodic callbacks.
- power_poll_callback_.callback().Run();
-#endif
-
- on_more_io_data_called_ = 0;
- AllowEntryToOnMoreIOData();
stream_->Start(this);
// For UMA tracking purposes, start the wedge detection timer. This allows us
@@ -221,28 +193,17 @@ void AudioOutputController::DoPlay() {
handler_->OnPlaying();
}
-#if defined(AUDIO_POWER_MONITORING)
-void AudioOutputController::ReportPowerMeasurementPeriodically() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- const std::pair<float, bool>& reading =
- power_monitor_.ReadCurrentPowerAndClip();
- handler_->OnPowerMeasured(reading.first, reading.second);
- message_loop_->PostDelayedTask(
- FROM_HERE, power_poll_callback_.callback(),
- TimeDelta::FromSeconds(1) / kPowerMeasurementsPerSecond);
-}
-#endif
-
void AudioOutputController::StopStream() {
DCHECK(message_loop_->BelongsToCurrentThread());
if (state_ == kPlaying) {
wedge_timer_.reset();
stream_->Stop();
- DisallowEntryToOnMoreIOData();
#if defined(AUDIO_POWER_MONITORING)
- power_poll_callback_.Cancel();
+ // A stopped stream is silent, and power_montior_.Scan() is no longer being
+ // called; so we must reset the power monitor.
+ power_monitor_.Reset();
#endif
state_ = kPaused;
@@ -264,11 +225,6 @@ void AudioOutputController::DoPause() {
// a better way to know when it should exit PPB_Audio_Shared::Run().
sync_reader_->UpdatePendingBytes(-1);
-#if defined(AUDIO_POWER_MONITORING)
- // Paused means silence follows.
- handler_->OnPowerMeasured(AudioPowerMonitor::zero_power(), false);
-#endif
-
handler_->OnPaused();
}
@@ -335,14 +291,7 @@ void AudioOutputController::DoReportError() {
int AudioOutputController::OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) {
- return OnMoreIOData(NULL, dest, buffers_state);
-}
-
-int AudioOutputController::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- DisallowEntryToOnMoreIOData();
- TRACE_EVENT0("audio", "AudioOutputController::OnMoreIOData");
+ TRACE_EVENT0("audio", "AudioOutputController::OnMoreData");
// Indicate that we haven't wedged (at least not indefinitely, WedgeCheck()
// may have already fired if OnMoreIOData() took an abnormal amount of time).
@@ -351,7 +300,7 @@ int AudioOutputController::OnMoreIOData(AudioBus* source,
if (base::AtomicRefCountIsZero(&on_more_io_data_called_))
base::AtomicRefCountInc(&on_more_io_data_called_);
- sync_reader_->Read(source, dest);
+ sync_reader_->Read(dest);
const int frames = dest->frames();
sync_reader_->UpdatePendingBytes(
@@ -361,7 +310,6 @@ int AudioOutputController::OnMoreIOData(AudioBus* source,
power_monitor_.Scan(*dest, frames);
#endif
- AllowEntryToOnMoreIOData();
return frames;
}
@@ -463,14 +411,13 @@ void AudioOutputController::DoStopDiverting() {
DCHECK(!diverting_to_stream_);
}
-void AudioOutputController::AllowEntryToOnMoreIOData() {
- DCHECK(base::AtomicRefCountIsZero(&num_allowed_io_));
- base::AtomicRefCountInc(&num_allowed_io_);
-}
-
-void AudioOutputController::DisallowEntryToOnMoreIOData() {
- const bool is_zero = !base::AtomicRefCountDec(&num_allowed_io_);
- DCHECK(is_zero);
+std::pair<float, bool> AudioOutputController::ReadCurrentPowerAndClip() {
+#if defined(AUDIO_POWER_MONITORING)
+ return power_monitor_.ReadCurrentPowerAndClip();
+#else
+ NOTREACHED();
+ return std::make_pair(AudioPowerMonitor::zero_power(), false);
+#endif
}
void AudioOutputController::WedgeCheck() {
@@ -478,15 +425,8 @@ void AudioOutputController::WedgeCheck() {
// If we should be playing and we haven't, that's a wedge.
if (state_ == kPlaying) {
- const bool playback_success =
- base::AtomicRefCountIsOne(&on_more_io_data_called_);
-
- UMA_HISTOGRAM_BOOLEAN(
- "Media.AudioOutputControllerPlaybackStartupSuccess", playback_success);
-
- // Let the AudioManager try and fix it.
- if (!playback_success)
- audio_manager_->FixWedgedAudio();
+ UMA_HISTOGRAM_BOOLEAN("Media.AudioOutputControllerPlaybackStartupSuccess",
+ base::AtomicRefCountIsOne(&on_more_io_data_called_));
}
}
diff --git a/chromium/media/audio/audio_output_controller.h b/chromium/media/audio/audio_output_controller.h
index d16ce9e79b6..0b02ee2bbb2 100644
--- a/chromium/media/audio/audio_output_controller.h
+++ b/chromium/media/audio/audio_output_controller.h
@@ -7,7 +7,6 @@
#include "base/atomic_ref_count.h"
#include "base/callback.h"
-#include "base/cancelable_callback.h"
#include "base/memory/ref_counted.h"
#include "base/timer/timer.h"
#include "media/audio/audio_io.h"
@@ -70,7 +69,6 @@ class MEDIA_EXPORT AudioOutputController
public:
virtual void OnCreated() = 0;
virtual void OnPlaying() = 0;
- virtual void OnPowerMeasured(float power_dbfs, bool clipped) = 0;
virtual void OnPaused() = 0;
virtual void OnError() = 0;
virtual void OnDeviceChange(int new_buffer_size, int new_sample_rate) = 0;
@@ -93,9 +91,8 @@ class MEDIA_EXPORT AudioOutputController
virtual void UpdatePendingBytes(uint32 bytes) = 0;
// Attempts to completely fill |dest|, zeroing |dest| if the request can not
- // be fulfilled (due to timeout). |source| may optionally be provided for
- // input data.
- virtual void Read(const AudioBus* source, AudioBus* dest) = 0;
+ // be fulfilled (due to timeout).
+ virtual void Read(AudioBus* dest) = 0;
// Close this synchronous reader.
virtual void Close() = 0;
@@ -107,13 +104,11 @@ class MEDIA_EXPORT AudioOutputController
// OnCreated() call from the same audio manager thread. |audio_manager| must
// outlive AudioOutputController.
// The |output_device_id| can be either empty (default device) or specify a
- // specific hardware device for audio output. The |input_device_id| is
- // used only for unified audio when opening up input and output at the same
- // time (controlled by |params.input_channel_count()|).
+ // specific hardware device for audio output.
static scoped_refptr<AudioOutputController> Create(
AudioManager* audio_manager, EventHandler* event_handler,
const AudioParameters& params, const std::string& output_device_id,
- const std::string& input_device_id, SyncReader* sync_reader);
+ SyncReader* sync_reader);
// Methods to control playback of the stream.
@@ -155,9 +150,6 @@ class MEDIA_EXPORT AudioOutputController
// AudioSourceCallback implementation.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
virtual void OnError(AudioOutputStream* stream) OVERRIDE;
// AudioDeviceListener implementation. When called AudioOutputController will
@@ -171,6 +163,10 @@ class MEDIA_EXPORT AudioOutputController
virtual void StartDiverting(AudioOutputStream* to_stream) OVERRIDE;
virtual void StopDiverting() OVERRIDE;
+ // Accessor for AudioPowerMonitor::ReadCurrentPowerAndClip(). See comments in
+ // audio_power_monitor.h for usage. This may be called on any thread.
+ std::pair<float, bool> ReadCurrentPowerAndClip();
+
protected:
// Internal state of the source.
enum State {
@@ -186,14 +182,9 @@ class MEDIA_EXPORT AudioOutputController
virtual ~AudioOutputController();
private:
- // We are polling sync reader if data became available.
- static const int kPollNumAttempts;
- static const int kPollPauseInMilliseconds;
-
AudioOutputController(AudioManager* audio_manager, EventHandler* handler,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
SyncReader* sync_reader);
// The following methods are executed on the audio manager thread.
@@ -208,22 +199,13 @@ class MEDIA_EXPORT AudioOutputController
void DoStartDiverting(AudioOutputStream* to_stream);
void DoStopDiverting();
- // Calls EventHandler::OnPowerMeasured() with the current power level and then
- // schedules itself to be called again later.
- void ReportPowerMeasurementPeriodically();
-
// Helper method that stops the physical stream.
void StopStream();
// Helper method that stops, closes, and NULLs |*stream_|.
void DoStopCloseAndClearStream();
- // Sanity-check that entry/exit to OnMoreIOData() by the hardware audio thread
- // happens only between AudioOutputStream::Start() and Stop().
- void AllowEntryToOnMoreIOData();
- void DisallowEntryToOnMoreIOData();
-
- // Checks if a stream was started successfully but never calls OnMoreIOData().
+ // Checks if a stream was started successfully but never calls OnMoreData().
void WedgeCheck();
AudioManager* const audio_manager_;
@@ -234,9 +216,6 @@ class MEDIA_EXPORT AudioOutputController
// default output device.
std::string output_device_id_;
- // Used by the unified IO to open the correct input device.
- const std::string input_device_id_;
-
AudioOutputStream* stream_;
// When non-NULL, audio is being diverted to this stream.
@@ -250,25 +229,15 @@ class MEDIA_EXPORT AudioOutputController
// is not required for reading on the audio manager thread.
State state_;
- // Binary semaphore, used to ensure that only one thread enters the
- // OnMoreIOData() method, and only when it is valid to do so. This is for
- // sanity-checking the behavior of platform implementations of
- // AudioOutputStream. In other words, multiple contention is not expected,
- // nor in the design here.
- base::AtomicRefCount num_allowed_io_;
-
// SyncReader is used only in low latency mode for synchronous reading.
SyncReader* const sync_reader_;
// The message loop of audio manager thread that this object runs on.
- const scoped_refptr<base::MessageLoopProxy> message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> message_loop_;
#if defined(AUDIO_POWER_MONITORING)
- // Scans audio samples from OnMoreIOData() as input to compute power levels.
+ // Scans audio samples from OnMoreData() as input to compute power levels.
AudioPowerMonitor power_monitor_;
-
- // Periodic callback to report power levels during playback.
- base::CancelableClosure power_poll_callback_;
#endif
// Flags when we've asked for a stream to start but it never did.
diff --git a/chromium/media/audio/audio_output_controller_unittest.cc b/chromium/media/audio/audio_output_controller_unittest.cc
index 457265ec970..125763c9bfe 100644
--- a/chromium/media/audio/audio_output_controller_unittest.cc
+++ b/chromium/media/audio/audio_output_controller_unittest.cc
@@ -39,7 +39,6 @@ class MockAudioOutputControllerEventHandler
MOCK_METHOD0(OnCreated, void());
MOCK_METHOD0(OnPlaying, void());
- MOCK_METHOD2(OnPowerMeasured, void(float power_dbfs, bool clipped));
MOCK_METHOD0(OnPaused, void());
MOCK_METHOD0(OnError, void());
MOCK_METHOD2(OnDeviceChange, void(int new_buffer_size, int new_sample_rate));
@@ -54,7 +53,7 @@ class MockAudioOutputControllerSyncReader
MockAudioOutputControllerSyncReader() {}
MOCK_METHOD1(UpdatePendingBytes, void(uint32 bytes));
- MOCK_METHOD2(Read, void(const AudioBus* source, AudioBus* dest));
+ MOCK_METHOD1(Read, void(AudioBus* dest));
MOCK_METHOD0(Close, void());
private:
@@ -84,10 +83,10 @@ ACTION_P(SignalEvent, event) {
static const float kBufferNonZeroData = 1.0f;
ACTION(PopulateBuffer) {
- arg1->Zero();
+ arg0->Zero();
// Note: To confirm the buffer will be populated in these tests, it's
// sufficient that only the first float in channel 0 is set to the value.
- arg1->channel(0)[0] = kBufferNonZeroData;
+ arg0->channel(0)[0] = kBufferNonZeroData;
}
class AudioOutputControllerTest : public testing::Test {
@@ -121,7 +120,7 @@ class AudioOutputControllerTest : public testing::Test {
controller_ = AudioOutputController::Create(
audio_manager_.get(), &mock_event_handler_, params_, std::string(),
- std::string(), &mock_sync_reader_);
+ &mock_sync_reader_);
if (controller_.get())
controller_->SetVolume(kTestVolume);
@@ -129,20 +128,15 @@ class AudioOutputControllerTest : public testing::Test {
}
void Play() {
- // Expect the event handler to receive one OnPlaying() call and one or more
- // OnPowerMeasured() calls.
+ // Expect the event handler to receive one OnPlaying() call.
EXPECT_CALL(mock_event_handler_, OnPlaying())
.WillOnce(SignalEvent(&play_event_));
-#if defined(AUDIO_POWER_MONITORING)
- EXPECT_CALL(mock_event_handler_, OnPowerMeasured(_, false))
- .Times(AtLeast(1));
-#endif
// During playback, the mock pretends to provide audio data rendered and
// sent from the render process.
EXPECT_CALL(mock_sync_reader_, UpdatePendingBytes(_))
.Times(AtLeast(1));
- EXPECT_CALL(mock_sync_reader_, Read(_, _))
+ EXPECT_CALL(mock_sync_reader_, Read(_))
.WillRepeatedly(DoAll(PopulateBuffer(),
SignalEvent(&read_event_)));
controller_->Play();
@@ -166,7 +160,7 @@ class AudioOutputControllerTest : public testing::Test {
// Simulate a device change event to AudioOutputController from the
// AudioManager.
- audio_manager_->GetMessageLoop()->PostTask(
+ audio_manager_->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioOutputController::OnDeviceChange, controller_));
}
diff --git a/chromium/media/audio/audio_output_device.cc b/chromium/media/audio/audio_output_device.cc
index 1f9efc185bd..5cc602197eb 100644
--- a/chromium/media/audio/audio_output_device.cc
+++ b/chromium/media/audio/audio_output_device.cc
@@ -6,7 +6,6 @@
#include "base/basictypes.h"
#include "base/debug/trace_event.h"
-#include "base/message_loop/message_loop.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "media/audio/audio_output_controller.h"
@@ -33,15 +32,14 @@ class AudioOutputDevice::AudioThreadCallback
private:
AudioRendererSink::RenderCallback* render_callback_;
- scoped_ptr<AudioBus> input_bus_;
scoped_ptr<AudioBus> output_bus_;
DISALLOW_COPY_AND_ASSIGN(AudioThreadCallback);
};
AudioOutputDevice::AudioOutputDevice(
scoped_ptr<AudioOutputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop)
- : ScopedLoopObserver(io_loop),
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner)
+ : ScopedTaskRunnerObserver(io_task_runner),
callback_(NULL),
ipc_(ipc.Pass()),
state_(IDLE),
@@ -58,10 +56,10 @@ AudioOutputDevice::AudioOutputDevice(
COMPILE_ASSERT(PAUSED < PLAYING, invalid_enum_value_assignment_3);
}
-void AudioOutputDevice::InitializeUnifiedStream(const AudioParameters& params,
+void AudioOutputDevice::InitializeWithSessionId(const AudioParameters& params,
RenderCallback* callback,
int session_id) {
- DCHECK(!callback_) << "Calling InitializeUnifiedStream() twice?";
+ DCHECK(!callback_) << "Calling InitializeWithSessionId() twice?";
DCHECK(params.IsValid());
audio_parameters_ = params;
callback_ = callback;
@@ -70,7 +68,7 @@ void AudioOutputDevice::InitializeUnifiedStream(const AudioParameters& params,
void AudioOutputDevice::Initialize(const AudioParameters& params,
RenderCallback* callback) {
- InitializeUnifiedStream(params, callback, 0);
+ InitializeWithSessionId(params, callback, 0);
}
AudioOutputDevice::~AudioOutputDevice() {
@@ -81,7 +79,7 @@ AudioOutputDevice::~AudioOutputDevice() {
void AudioOutputDevice::Start() {
DCHECK(callback_) << "Initialize hasn't been called";
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::CreateStreamOnIOThread, this,
audio_parameters_));
}
@@ -93,17 +91,17 @@ void AudioOutputDevice::Stop() {
stopping_hack_ = true;
}
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::ShutDownOnIOThread, this));
}
void AudioOutputDevice::Play() {
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::PlayOnIOThread, this));
}
void AudioOutputDevice::Pause() {
- message_loop()->PostTask(FROM_HERE,
+ task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::PauseOnIOThread, this));
}
@@ -111,7 +109,7 @@ bool AudioOutputDevice::SetVolume(double volume) {
if (volume < 0 || volume > 1.0)
return false;
- if (!message_loop()->PostTask(FROM_HERE,
+ if (!task_runner()->PostTask(FROM_HERE,
base::Bind(&AudioOutputDevice::SetVolumeOnIOThread, this, volume))) {
return false;
}
@@ -120,7 +118,7 @@ bool AudioOutputDevice::SetVolume(double volume) {
}
void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == IDLE) {
state_ = CREATING_STREAM;
ipc_->CreateStream(this, params, session_id_);
@@ -128,7 +126,7 @@ void AudioOutputDevice::CreateStreamOnIOThread(const AudioParameters& params) {
}
void AudioOutputDevice::PlayOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == PAUSED) {
ipc_->PlayStream();
state_ = PLAYING;
@@ -139,7 +137,7 @@ void AudioOutputDevice::PlayOnIOThread() {
}
void AudioOutputDevice::PauseOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ == PLAYING) {
ipc_->PauseStream();
state_ = PAUSED;
@@ -148,7 +146,7 @@ void AudioOutputDevice::PauseOnIOThread() {
}
void AudioOutputDevice::ShutDownOnIOThread() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Close the stream, if we haven't already.
if (state_ >= CREATING_STREAM) {
@@ -172,13 +170,13 @@ void AudioOutputDevice::ShutDownOnIOThread() {
}
void AudioOutputDevice::SetVolumeOnIOThread(double volume) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
if (state_ >= CREATING_STREAM)
ipc_->SetVolume(volume);
}
void AudioOutputDevice::OnStateChanged(AudioOutputIPCDelegate::State state) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
// Do nothing if the stream has been closed.
if (state_ < CREATING_STREAM)
@@ -211,7 +209,7 @@ void AudioOutputDevice::OnStreamCreated(
base::SharedMemoryHandle handle,
base::SyncSocket::Handle socket_handle,
int length) {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
#if defined(OS_WIN)
DCHECK(handle);
DCHECK(socket_handle);
@@ -254,7 +252,7 @@ void AudioOutputDevice::OnStreamCreated(
}
void AudioOutputDevice::OnIPCClosed() {
- DCHECK(message_loop()->BelongsToCurrentThread());
+ DCHECK(task_runner()->BelongsToCurrentThread());
state_ = IPC_CLOSED;
ipc_.reset();
}
@@ -280,26 +278,10 @@ AudioOutputDevice::AudioThreadCallback::~AudioThreadCallback() {
void AudioOutputDevice::AudioThreadCallback::MapSharedMemory() {
CHECK_EQ(total_segments_, 1);
CHECK(shared_memory_.Map(memory_length_));
-
- // Calculate output and input memory size.
- int output_memory_size = AudioBus::CalculateMemorySize(audio_parameters_);
- int input_channels = audio_parameters_.input_channels();
- int frames = audio_parameters_.frames_per_buffer();
- int input_memory_size = AudioBus::CalculateMemorySize(input_channels, frames);
-
- int io_size = output_memory_size + input_memory_size;
-
- DCHECK_EQ(memory_length_, io_size);
+ DCHECK_EQ(memory_length_, AudioBus::CalculateMemorySize(audio_parameters_));
output_bus_ =
AudioBus::WrapMemory(audio_parameters_, shared_memory_.memory());
-
- if (input_channels > 0) {
- // The input data is after the output data.
- char* input_data =
- static_cast<char*>(shared_memory_.memory()) + output_memory_size;
- input_bus_ = AudioBus::WrapMemory(input_channels, frames, input_data);
- }
}
// Called whenever we receive notifications about pending data.
@@ -316,13 +298,7 @@ void AudioOutputDevice::AudioThreadCallback::Process(int pending_data) {
// Update the audio-delay measurement then ask client to render audio. Since
// |output_bus_| is wrapping the shared memory the Render() call is writing
// directly into the shared memory.
- int input_channels = audio_parameters_.input_channels();
- if (input_bus_ && input_channels > 0) {
- render_callback_->RenderIO(
- input_bus_.get(), output_bus_.get(), audio_delay_milliseconds);
- } else {
- render_callback_->Render(output_bus_.get(), audio_delay_milliseconds);
- }
+ render_callback_->Render(output_bus_.get(), audio_delay_milliseconds);
}
} // namespace media.
diff --git a/chromium/media/audio/audio_output_device.h b/chromium/media/audio/audio_output_device.h
index 66f78972f46..8449e9faa14 100644
--- a/chromium/media/audio/audio_output_device.h
+++ b/chromium/media/audio/audio_output_device.h
@@ -62,11 +62,10 @@
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/shared_memory.h"
-#include "base/message_loop/message_loop.h"
#include "media/audio/audio_device_thread.h"
#include "media/audio/audio_output_ipc.h"
#include "media/audio/audio_parameters.h"
-#include "media/audio/scoped_loop_observer.h"
+#include "media/audio/scoped_task_runner_observer.h"
#include "media/base/audio_renderer_sink.h"
#include "media/base/media_export.h"
@@ -75,21 +74,19 @@ namespace media {
class MEDIA_EXPORT AudioOutputDevice
: NON_EXPORTED_BASE(public AudioRendererSink),
NON_EXPORTED_BASE(public AudioOutputIPCDelegate),
- NON_EXPORTED_BASE(public ScopedLoopObserver) {
+ NON_EXPORTED_BASE(public ScopedTaskRunnerObserver) {
public:
// NOTE: Clients must call Initialize() before using.
- AudioOutputDevice(scoped_ptr<AudioOutputIPC> ipc,
- const scoped_refptr<base::MessageLoopProxy>& io_loop);
+ AudioOutputDevice(
+ scoped_ptr<AudioOutputIPC> ipc,
+ const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner);
// Initialize function for clients wishing to have unified input and
// output, |params| may specify |input_channels| > 0, representing a
// number of input channels which will be at the same sample-rate
// and buffer-size as the output as specified in |params|. |session_id| is
// used for the browser to select the correct input device.
- // In this case, the callback's RenderIO() method will be called instead
- // of Render(), providing the synchronized input data at the same time as
- // when new output data is to be rendered.
- void InitializeUnifiedStream(const AudioParameters& params,
+ void InitializeWithSessionId(const AudioParameters& params,
RenderCallback* callback,
int session_id);
diff --git a/chromium/media/audio/audio_output_device_unittest.cc b/chromium/media/audio/audio_output_device_unittest.cc
index 7aca2627745..92d94250b4a 100644
--- a/chromium/media/audio/audio_output_device_unittest.cc
+++ b/chromium/media/audio/audio_output_device_unittest.cc
@@ -37,9 +37,6 @@ class MockRenderCallback : public AudioRendererSink::RenderCallback {
virtual ~MockRenderCallback() {}
MOCK_METHOD2(Render, int(AudioBus* dest, int audio_delay_milliseconds));
- MOCK_METHOD3(RenderIO, void(AudioBus* source,
- AudioBus* dest,
- int audio_delay_milliseconds));
MOCK_METHOD0(OnRenderError, void());
};
@@ -114,8 +111,6 @@ class AudioOutputDeviceTest
private:
int CalculateMemorySize();
- const bool synchronized_io_;
- const int input_channels_;
SharedMemory shared_memory_;
CancelableSyncSocket browser_socket_;
CancelableSyncSocket renderer_socket_;
@@ -124,24 +119,14 @@ class AudioOutputDeviceTest
};
int AudioOutputDeviceTest::CalculateMemorySize() {
- // Calculate output and input memory size.
- int output_memory_size =
- AudioBus::CalculateMemorySize(default_audio_parameters_);
-
- int frames = default_audio_parameters_.frames_per_buffer();
- int input_memory_size =
- AudioBus::CalculateMemorySize(input_channels_, frames);
-
- return output_memory_size + input_memory_size;
+ // Calculate output memory size.
+ return AudioBus::CalculateMemorySize(default_audio_parameters_);
}
-AudioOutputDeviceTest::AudioOutputDeviceTest()
- : synchronized_io_(GetParam()),
- input_channels_(synchronized_io_ ? 2 : 0) {
+AudioOutputDeviceTest::AudioOutputDeviceTest() {
default_audio_parameters_.Reset(
AudioParameters::AUDIO_PCM_LINEAR,
- CHANNEL_LAYOUT_STEREO, 2, input_channels_,
- 48000, 16, 1024);
+ CHANNEL_LAYOUT_STEREO, 2, 0, 48000, 16, 1024);
audio_output_ipc_ = new MockAudioOutputIPC();
audio_device_ = new AudioOutputDevice(
@@ -207,18 +192,11 @@ void AudioOutputDeviceTest::ExpectRenderCallback() {
// writing the interleaved audio data into the shared memory section.
// So, for the sake of this test, we consider the call to Render a sign
// of success and quit the loop.
- if (synchronized_io_) {
- // For synchronized I/O, we expect RenderIO().
- EXPECT_CALL(callback_, RenderIO(_, _, _))
- .WillOnce(QuitLoop(io_loop_.message_loop_proxy()));
- } else {
- // For output only we expect Render().
- const int kNumberOfFramesToProcess = 0;
- EXPECT_CALL(callback_, Render(_, _))
- .WillOnce(DoAll(
- QuitLoop(io_loop_.message_loop_proxy()),
- Return(kNumberOfFramesToProcess)));
- }
+ const int kNumberOfFramesToProcess = 0;
+ EXPECT_CALL(callback_, Render(_, _))
+ .WillOnce(DoAll(
+ QuitLoop(io_loop_.message_loop_proxy()),
+ Return(kNumberOfFramesToProcess)));
}
void AudioOutputDeviceTest::WaitUntilRenderCallback() {
@@ -280,6 +258,5 @@ TEST_P(AudioOutputDeviceTest, CreateStream) {
}
INSTANTIATE_TEST_CASE_P(Render, AudioOutputDeviceTest, Values(false));
-INSTANTIATE_TEST_CASE_P(RenderIO, AudioOutputDeviceTest, Values(true));
} // namespace media.
diff --git a/chromium/media/audio/audio_output_dispatcher.cc b/chromium/media/audio/audio_output_dispatcher.cc
index 89912c07dce..7f3dd10e39a 100644
--- a/chromium/media/audio/audio_output_dispatcher.cc
+++ b/chromium/media/audio/audio_output_dispatcher.cc
@@ -4,27 +4,25 @@
#include "media/audio/audio_output_dispatcher.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
namespace media {
AudioOutputDispatcher::AudioOutputDispatcher(
AudioManager* audio_manager,
const AudioParameters& params,
- const std::string& output_device_id,
- const std::string& input_device_id)
+ const std::string& device_id)
: audio_manager_(audio_manager),
- message_loop_(audio_manager->GetMessageLoop()),
+ task_runner_(audio_manager->GetTaskRunner()),
params_(params),
- output_device_id_(output_device_id),
- input_device_id_(input_device_id) {
+ device_id_(device_id) {
// We expect to be instantiated on the audio thread. Otherwise the
- // message_loop_ member will point to the wrong message loop!
- DCHECK(audio_manager->GetMessageLoop()->BelongsToCurrentThread());
+ // |task_runner_| member will point to the wrong message loop!
+ DCHECK(audio_manager->GetTaskRunner()->BelongsToCurrentThread());
}
AudioOutputDispatcher::~AudioOutputDispatcher() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
}
} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher.h b/chromium/media/audio/audio_output_dispatcher.h
index d707aff14b6..079cba0ed74 100644
--- a/chromium/media/audio/audio_output_dispatcher.h
+++ b/chromium/media/audio/audio_output_dispatcher.h
@@ -26,7 +26,7 @@
#include "media/audio/audio_parameters.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -38,8 +38,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
public:
AudioOutputDispatcher(AudioManager* audio_manager,
const AudioParameters& params,
- const std::string& output_device_id,
- const std::string& input_device_id);
+ const std::string& device_id);
// Called by AudioOutputProxy to open the stream.
// Returns false, if it fails to open it.
@@ -66,15 +65,7 @@ class MEDIA_EXPORT AudioOutputDispatcher
// Called on the audio thread when the AudioManager is shutting down.
virtual void Shutdown() = 0;
- // Called by the AudioManager to restart streams when a wedge is detected. A
- // wedge means the OS failed to request any audio after StartStream(). When a
- // wedge is detected all streams across all dispatchers must be closed. After
- // all streams are closed, streams are restarted. See http://crbug.com/160920
- virtual void CloseStreamsForWedgeFix() = 0;
- virtual void RestartStreamsForWedgeFix() = 0;
-
- // Accessor to the input device id used by unified IO.
- const std::string& input_device_id() const { return input_device_id_; }
+ const std::string& device_id() const { return device_id_; }
protected:
friend class base::RefCountedThreadSafe<AudioOutputDispatcher>;
@@ -83,10 +74,9 @@ class MEDIA_EXPORT AudioOutputDispatcher
// A no-reference-held pointer (we don't want circular references) back to the
// AudioManager that owns this object.
AudioManager* audio_manager_;
- const scoped_refptr<base::MessageLoopProxy> message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
const AudioParameters params_;
- std::string output_device_id_;
- const std::string input_device_id_;
+ std::string device_id_;
private:
DISALLOW_COPY_AND_ASSIGN(AudioOutputDispatcher);
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.cc b/chromium/media/audio/audio_output_dispatcher_impl.cc
index 5118bef71e9..0cb3db85cad 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.cc
+++ b/chromium/media/audio/audio_output_dispatcher_impl.cc
@@ -8,7 +8,7 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_output_proxy.h"
@@ -19,12 +19,10 @@ AudioOutputDispatcherImpl::AudioOutputDispatcherImpl(
AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay)
: AudioOutputDispatcher(audio_manager,
params,
- output_device_id,
- input_device_id),
+ output_device_id),
idle_proxies_(0),
close_timer_(FROM_HERE,
close_delay,
@@ -41,7 +39,7 @@ AudioOutputDispatcherImpl::~AudioOutputDispatcherImpl() {
}
bool AudioOutputDispatcherImpl::OpenStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Ensure that there is at least one open stream.
if (idle_streams_.empty() && !CreateAndOpenStream())
@@ -55,7 +53,7 @@ bool AudioOutputDispatcherImpl::OpenStream() {
bool AudioOutputDispatcherImpl::StartStream(
AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(proxy_to_physical_map_.find(stream_proxy) ==
proxy_to_physical_map_.end());
@@ -82,7 +80,7 @@ bool AudioOutputDispatcherImpl::StartStream(
}
void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
DCHECK(it != proxy_to_physical_map_.end());
@@ -99,7 +97,7 @@ void AudioOutputDispatcherImpl::StopStream(AudioOutputProxy* stream_proxy) {
void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
AudioStreamMap::iterator it = proxy_to_physical_map_.find(stream_proxy);
if (it != proxy_to_physical_map_.end()) {
AudioOutputStream* physical_stream = it->second;
@@ -109,7 +107,7 @@ void AudioOutputDispatcherImpl::StreamVolumeSet(AudioOutputProxy* stream_proxy,
}
void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK_GT(idle_proxies_, 0u);
--idle_proxies_;
@@ -121,17 +119,21 @@ void AudioOutputDispatcherImpl::CloseStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputDispatcherImpl::Shutdown() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Close all idle streams immediately. The |close_timer_| will handle
// invalidating any outstanding tasks upon its destruction.
CloseAllIdleStreams();
+
+ // No AudioOutputProxy objects should hold a reference to us when we get
+ // to this stage.
+ DCHECK(HasOneRef()) << "Only the AudioManager should hold a reference";
}
bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
AudioOutputStream* stream = audio_manager_->MakeAudioOutputStream(
- params_, output_device_id_, input_device_id_);
+ params_, device_id_);
if (!stream)
return false;
@@ -143,19 +145,19 @@ bool AudioOutputDispatcherImpl::CreateAndOpenStream() {
const int stream_id = audio_stream_id_++;
audio_stream_ids_[stream] = stream_id;
audio_log_->OnCreated(
- stream_id, params_, input_device_id_, output_device_id_);
+ stream_id, params_, device_id_);
idle_streams_.push_back(stream);
return true;
}
void AudioOutputDispatcherImpl::CloseAllIdleStreams() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
CloseIdleStreams(0);
}
void AudioOutputDispatcherImpl::CloseIdleStreams(size_t keep_alive) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (idle_streams_.size() <= keep_alive)
return;
for (size_t i = keep_alive; i < idle_streams_.size(); ++i) {
@@ -170,17 +172,4 @@ void AudioOutputDispatcherImpl::CloseIdleStreams(size_t keep_alive) {
idle_streams_.erase(idle_streams_.begin() + keep_alive, idle_streams_.end());
}
-void AudioOutputDispatcherImpl::CloseStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- CloseAllIdleStreams();
-}
-
-void AudioOutputDispatcherImpl::RestartStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Should only be called when the dispatcher is used with fake streams which
- // don't need to be shutdown or restarted.
- CHECK_EQ(params_.format(), AudioParameters::AUDIO_FAKE);
-}
-
} // namespace media
diff --git a/chromium/media/audio/audio_output_dispatcher_impl.h b/chromium/media/audio/audio_output_dispatcher_impl.h
index 037e11466f1..52d647a3be6 100644
--- a/chromium/media/audio/audio_output_dispatcher_impl.h
+++ b/chromium/media/audio/audio_output_dispatcher_impl.h
@@ -36,7 +36,6 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
AudioOutputDispatcherImpl(AudioManager* audio_manager,
const AudioParameters& params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay);
// Opens a new physical stream if there are no pending streams in
@@ -62,9 +61,6 @@ class MEDIA_EXPORT AudioOutputDispatcherImpl : public AudioOutputDispatcher {
virtual void Shutdown() OVERRIDE;
- virtual void CloseStreamsForWedgeFix() OVERRIDE;
- virtual void RestartStreamsForWedgeFix() OVERRIDE;
-
private:
friend class base::RefCountedThreadSafe<AudioOutputDispatcherImpl>;
virtual ~AudioOutputDispatcherImpl();
diff --git a/chromium/media/audio/audio_output_ipc.h b/chromium/media/audio/audio_output_ipc.h
index 3353735b085..f85d8e01953 100644
--- a/chromium/media/audio/audio_output_ipc.h
+++ b/chromium/media/audio/audio_output_ipc.h
@@ -23,7 +23,8 @@ class MEDIA_EXPORT AudioOutputIPCDelegate {
enum State {
kPlaying,
kPaused,
- kError
+ kError,
+ kStateLast = kError
};
// Called when state of an audio stream has changed.
diff --git a/chromium/media/audio/audio_output_proxy_unittest.cc b/chromium/media/audio/audio_output_proxy_unittest.cc
index cea098820aa..b8f23acaa92 100644
--- a/chromium/media/audio/audio_output_proxy_unittest.cc
+++ b/chromium/media/audio/audio_output_proxy_unittest.cc
@@ -5,7 +5,6 @@
#include <string>
#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/run_loop.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
@@ -94,27 +93,25 @@ class MockAudioManager : public AudioManagerBase {
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD0(GetAudioInputDeviceModel, base::string16());
- MOCK_METHOD3(MakeAudioOutputStream, AudioOutputStream*(
+ MOCK_METHOD2(MakeAudioOutputStream, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id));
- MOCK_METHOD3(MakeAudioOutputStreamProxy, AudioOutputStream*(
+ const std::string& device_id));
+ MOCK_METHOD2(MakeAudioOutputStreamProxy, AudioOutputStream*(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id));
+ const std::string& device_id));
MOCK_METHOD2(MakeAudioInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD0(ShowAudioInputSettings, void());
- MOCK_METHOD0(GetMessageLoop, scoped_refptr<base::MessageLoopProxy>());
- MOCK_METHOD0(GetWorkerLoop, scoped_refptr<base::MessageLoopProxy>());
+ MOCK_METHOD0(GetTaskRunner, scoped_refptr<base::SingleThreadTaskRunner>());
+ MOCK_METHOD0(GetWorkerTaskRunner,
+ scoped_refptr<base::SingleThreadTaskRunner>());
MOCK_METHOD1(GetAudioInputDeviceNames, void(
media::AudioDeviceNames* device_name));
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD3(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params, const std::string& device_id,
- const std::string& input_device_id));
+ MOCK_METHOD2(MakeLowLatencyOutputStream, AudioOutputStream*(
+ const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLinearInputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
@@ -132,10 +129,6 @@ class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
audio_bus->Zero();
return audio_bus->frames();
}
- int OnMoreIOData(AudioBus* source, AudioBus* dest,
- AudioBuffersState buffers_state) {
- return OnMoreData(dest, buffers_state);
- }
MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
};
@@ -146,9 +139,9 @@ namespace media {
class AudioOutputProxyTest : public testing::Test {
protected:
virtual void SetUp() {
- EXPECT_CALL(manager_, GetMessageLoop())
+ EXPECT_CALL(manager_, GetTaskRunner())
.WillRepeatedly(Return(message_loop_.message_loop_proxy()));
- EXPECT_CALL(manager_, GetWorkerLoop())
+ EXPECT_CALL(manager_, GetWorkerTaskRunner())
.WillRepeatedly(Return(message_loop_.message_loop_proxy()));
// Use a low sample rate and large buffer size when testing otherwise the
// FakeAudioOutputStream will keep the message loop busy indefinitely; i.e.,
@@ -168,7 +161,6 @@ class AudioOutputProxyTest : public testing::Test {
dispatcher_impl_ = new AudioOutputDispatcherImpl(&manager(),
params_,
std::string(),
- std::string(),
close_delay);
}
@@ -199,7 +191,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenAndClose(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -213,7 +205,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartAndStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -236,7 +228,7 @@ class AudioOutputProxyTest : public testing::Test {
void CloseAfterStop(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -261,7 +253,7 @@ class AudioOutputProxyTest : public testing::Test {
void TwoStreams(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -280,7 +272,7 @@ class AudioOutputProxyTest : public testing::Test {
void OpenFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(false));
@@ -297,7 +289,7 @@ class AudioOutputProxyTest : public testing::Test {
void CreateAndWait(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -314,7 +306,7 @@ class AudioOutputProxyTest : public testing::Test {
void OneStream_TwoPlays(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
@@ -346,7 +338,7 @@ class AudioOutputProxyTest : public testing::Test {
MockAudioOutputStream stream1(&manager_, params_);
MockAudioOutputStream stream2(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2));
@@ -383,7 +375,7 @@ class AudioOutputProxyTest : public testing::Test {
void StartFailed(AudioOutputDispatcher* dispatcher) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream));
EXPECT_CALL(stream, Open())
.WillOnce(Return(true));
@@ -394,7 +386,7 @@ class AudioOutputProxyTest : public testing::Test {
WaitForCloseTimer(&stream);
// |stream| is closed at this point. Start() should reopen it again.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(2)
.WillRepeatedly(Return(reinterpret_cast<AudioOutputStream*>(NULL)));
@@ -434,8 +426,7 @@ class AudioOutputResamplerTest : public AudioOutputProxyTest {
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
16000, 16, 1024);
resampler_ = new AudioOutputResampler(
- &manager(), params_, resampler_params_, std::string(), std::string(),
- close_delay);
+ &manager(), params_, resampler_params_, std::string(), close_delay);
}
virtual void OnStart() OVERRIDE {
@@ -535,7 +526,7 @@ TEST_F(AudioOutputResamplerTest, StartFailed) { StartFailed(resampler_); }
// ensure AudioOutputResampler falls back to the high latency path.
TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
MockAudioOutputStream stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(2)
.WillOnce(Return(static_cast<AudioOutputStream*>(NULL)))
.WillRepeatedly(Return(&stream));
@@ -552,7 +543,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyCreateFailedFallback) {
TEST_F(AudioOutputResamplerTest, LowLatencyOpenFailedFallback) {
MockAudioOutputStream failed_stream(&manager_, params_);
MockAudioOutputStream okay_stream(&manager_, params_);
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(2)
.WillOnce(Return(&failed_stream))
.WillRepeatedly(Return(&okay_stream));
@@ -580,7 +571,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
#else
static const int kFallbackCount = 1;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -591,7 +582,7 @@ TEST_F(AudioOutputResamplerTest, HighLatencyFallbackFailed) {
testing::Property(&AudioParameters::sample_rate, params_.sample_rate()),
testing::Property(
&AudioParameters::frames_per_buffer, params_.frames_per_buffer())),
- _, _))
+ _))
.Times(1)
.WillOnce(Return(&okay_stream));
EXPECT_CALL(okay_stream, Open())
@@ -613,7 +604,7 @@ TEST_F(AudioOutputResamplerTest, AllFallbackFailed) {
#else
static const int kFallbackCount = 2;
#endif
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.Times(kFallbackCount)
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -629,7 +620,7 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
MockAudioOutputStream stream2(&manager_, params_);
// Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
+ EXPECT_CALL(manager(), MakeAudioOutputStream(_, _))
.WillOnce(Return(&stream1))
.WillOnce(Return(&stream2))
.WillRepeatedly(Return(static_cast<AudioOutputStream*>(NULL)));
@@ -678,75 +669,4 @@ TEST_F(AudioOutputResamplerTest, LowLatencyOpenEventuallyFails) {
EXPECT_TRUE(stream2.start_called());
}
-// Ensures the methods used to fix audio output wedges are working correctly.
-TEST_F(AudioOutputResamplerTest, WedgeFix) {
- MockAudioOutputStream stream1(&manager_, params_);
- MockAudioOutputStream stream2(&manager_, params_);
- MockAudioOutputStream stream3(&manager_, params_);
-
- // Setup the mock such that all three streams are successfully created.
- EXPECT_CALL(manager(), MakeAudioOutputStream(_, _, _))
- .WillOnce(Return(&stream1))
- .WillOnce(Return(&stream2))
- .WillOnce(Return(&stream3));
-
- // Stream1 should be able to successfully open and start.
- EXPECT_CALL(stream1, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream1, SetVolume(_));
- EXPECT_CALL(stream2, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream2, SetVolume(_));
-
- // Open and start the first proxy and stream.
- AudioOutputProxy* proxy1 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy1->Open());
- proxy1->Start(&callback_);
- OnStart();
-
- // Open, but do not start the second proxy.
- AudioOutputProxy* proxy2 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy2->Open());
-
- // Open, start and then stop the third proxy.
- AudioOutputProxy* proxy3 = new AudioOutputProxy(resampler_.get());
- EXPECT_TRUE(proxy3->Open());
- proxy3->Start(&callback_);
- OnStart();
- proxy3->Stop();
-
- // Wait for stream to timeout and shutdown.
- WaitForCloseTimer(&stream2);
-
- EXPECT_CALL(stream1, Close());
- resampler_->CloseStreamsForWedgeFix();
-
- // Don't pump the MessageLoop between CloseStreamsForWedgeFix() and
- // RestartStreamsForWedgeFix() to simulate intended usage. The OnStart() call
- // will take care of necessary work.
-
- // Stream3 should take Stream1's place after RestartStreamsForWedgeFix(). No
- // additional streams should be opened for proxy2 and proxy3.
- EXPECT_CALL(stream3, Open())
- .WillOnce(Return(true));
- EXPECT_CALL(stream3, SetVolume(_));
-
- resampler_->RestartStreamsForWedgeFix();
- OnStart();
-
- // Perform the required Stop()/Close() shutdown dance for each proxy.
- proxy3->Close();
- proxy2->Close();
- proxy1->Stop();
- CloseAndWaitForCloseTimer(proxy1, &stream3);
-
- // Wait for all of the messages to fly and then verify stream behavior.
- EXPECT_TRUE(stream1.stop_called());
- EXPECT_TRUE(stream1.start_called());
- EXPECT_TRUE(stream2.stop_called());
- EXPECT_TRUE(stream2.start_called());
- EXPECT_TRUE(stream3.stop_called());
- EXPECT_TRUE(stream3.start_called());
-}
-
} // namespace media
diff --git a/chromium/media/audio/audio_output_resampler.cc b/chromium/media/audio/audio_output_resampler.cc
index c53f3e089ce..15633bb2017 100644
--- a/chromium/media/audio/audio_output_resampler.cc
+++ b/chromium/media/audio/audio_output_resampler.cc
@@ -7,8 +7,8 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop.h"
#include "base/metrics/histogram.h"
+#include "base/single_thread_task_runner.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/audio/audio_io.h"
@@ -31,9 +31,6 @@ class OnMoreDataConverter
// AudioSourceCallback interface.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE;
virtual void OnError(AudioOutputStream* stream) OVERRIDE;
// Sets |source_callback_|. If this is not a new object, then Stop() must be
@@ -72,20 +69,24 @@ class OnMoreDataConverter
// Record UMA statistics for hardware output configuration.
static void RecordStats(const AudioParameters& output_params) {
+ // Note the 'PRESUBMIT_IGNORE_UMA_MAX's below, these silence the PRESUBMIT.py
+ // check for uma enum max usage, since we're abusing UMA_HISTOGRAM_ENUMERATION
+ // to report a discrete value.
UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioBitsPerChannel", output_params.bits_per_sample(),
- limits::kMaxBitsPerSample);
+ "Media.HardwareAudioBitsPerChannel",
+ output_params.bits_per_sample(),
+ limits::kMaxBitsPerSample); // PRESUBMIT_IGNORE_UMA_MAX
UMA_HISTOGRAM_ENUMERATION(
"Media.HardwareAudioChannelLayout", output_params.channel_layout(),
- CHANNEL_LAYOUT_MAX);
+ CHANNEL_LAYOUT_MAX + 1);
UMA_HISTOGRAM_ENUMERATION(
"Media.HardwareAudioChannelCount", output_params.channels(),
- limits::kMaxChannels);
+ limits::kMaxChannels); // PRESUBMIT_IGNORE_UMA_MAX
- AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
- if (asr != kUnexpectedAudioSampleRate) {
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(output_params.sample_rate(), &asr)) {
UMA_HISTOGRAM_ENUMERATION(
- "Media.HardwareAudioSamplesPerSecond", asr, kUnexpectedAudioSampleRate);
+ "Media.HardwareAudioSamplesPerSecond", asr, kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS(
"Media.HardwareAudioSamplesPerSecondUnexpected",
@@ -96,21 +97,25 @@ static void RecordStats(const AudioParameters& output_params) {
// Record UMA statistics for hardware output configuration after fallback.
static void RecordFallbackStats(const AudioParameters& output_params) {
UMA_HISTOGRAM_BOOLEAN("Media.FallbackToHighLatencyAudioPath", true);
+ // Note the 'PRESUBMIT_IGNORE_UMA_MAX's below, these silence the PRESUBMIT.py
+ // check for uma enum max usage, since we're abusing UMA_HISTOGRAM_ENUMERATION
+ // to report a discrete value.
UMA_HISTOGRAM_ENUMERATION(
"Media.FallbackHardwareAudioBitsPerChannel",
- output_params.bits_per_sample(), limits::kMaxBitsPerSample);
+ output_params.bits_per_sample(),
+ limits::kMaxBitsPerSample); // PRESUBMIT_IGNORE_UMA_MAX
UMA_HISTOGRAM_ENUMERATION(
"Media.FallbackHardwareAudioChannelLayout",
- output_params.channel_layout(), CHANNEL_LAYOUT_MAX);
+ output_params.channel_layout(), CHANNEL_LAYOUT_MAX + 1);
UMA_HISTOGRAM_ENUMERATION(
- "Media.FallbackHardwareAudioChannelCount",
- output_params.channels(), limits::kMaxChannels);
+ "Media.FallbackHardwareAudioChannelCount", output_params.channels(),
+ limits::kMaxChannels); // PRESUBMIT_IGNORE_UMA_MAX
- AudioSampleRate asr = media::AsAudioSampleRate(output_params.sample_rate());
- if (asr != kUnexpectedAudioSampleRate) {
+ AudioSampleRate asr;
+ if (ToAudioSampleRate(output_params.sample_rate(), &asr)) {
UMA_HISTOGRAM_ENUMERATION(
"Media.FallbackHardwareAudioSamplesPerSecond",
- asr, kUnexpectedAudioSampleRate);
+ asr, kAudioSampleRateMax + 1);
} else {
UMA_HISTOGRAM_COUNTS(
"Media.FallbackHardwareAudioSamplesPerSecondUnexpected",
@@ -135,7 +140,7 @@ void AudioOutputResampler::SetupFallbackParams() {
AudioParameters::AUDIO_PCM_LINEAR, params_.channel_layout(),
params_.sample_rate(), params_.bits_per_sample(),
frames_per_buffer);
- output_device_id_ = "";
+ device_id_ = "";
Initialize();
#endif
}
@@ -144,10 +149,8 @@ AudioOutputResampler::AudioOutputResampler(AudioManager* audio_manager,
const AudioParameters& input_params,
const AudioParameters& output_params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay)
- : AudioOutputDispatcher(audio_manager, input_params, output_device_id,
- input_device_id),
+ : AudioOutputDispatcher(audio_manager, input_params, output_device_id),
close_delay_(close_delay),
output_params_(output_params),
streams_opened_(false) {
@@ -169,12 +172,11 @@ void AudioOutputResampler::Initialize() {
DCHECK(!streams_opened_);
DCHECK(callbacks_.empty());
dispatcher_ = new AudioOutputDispatcherImpl(
- audio_manager_, output_params_, output_device_id_, input_device_id_,
- close_delay_);
+ audio_manager_, output_params_, device_id_, close_delay_);
}
bool AudioOutputResampler::OpenStream() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (dispatcher_->OpenStream()) {
// Only record the UMA statistic if we didn't fallback during construction
@@ -233,7 +235,7 @@ bool AudioOutputResampler::OpenStream() {
bool AudioOutputResampler::StartStream(
AudioOutputStream::AudioSourceCallback* callback,
AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
OnMoreDataConverter* resampler_callback = NULL;
CallbackMap::iterator it = callbacks_.find(stream_proxy);
@@ -253,12 +255,12 @@ bool AudioOutputResampler::StartStream(
void AudioOutputResampler::StreamVolumeSet(AudioOutputProxy* stream_proxy,
double volume) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
dispatcher_->StreamVolumeSet(stream_proxy, volume);
}
void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
dispatcher_->StopStream(stream_proxy);
// Now that StopStream() has completed the underlying physical stream should
@@ -270,7 +272,7 @@ void AudioOutputResampler::StopStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
dispatcher_->CloseStream(stream_proxy);
// We assume that StopStream() is always called prior to CloseStream(), so
@@ -283,7 +285,7 @@ void AudioOutputResampler::CloseStream(AudioOutputProxy* stream_proxy) {
}
void AudioOutputResampler::Shutdown() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// No AudioOutputProxy objects should hold a reference to us when we get
// to this stage.
@@ -293,37 +295,6 @@ void AudioOutputResampler::Shutdown() {
DCHECK(callbacks_.empty());
}
-void AudioOutputResampler::CloseStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
-
- // Stop and close all active streams. Once all streams across all dispatchers
- // have been closed the AudioManager will call RestartStreamsForWedgeFix().
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- if (it->second->started())
- dispatcher_->StopStream(it->first);
- dispatcher_->CloseStream(it->first);
- }
-
- // Close all idle streams as well.
- dispatcher_->CloseStreamsForWedgeFix();
-}
-
-void AudioOutputResampler::RestartStreamsForWedgeFix() {
- DCHECK(message_loop_->BelongsToCurrentThread());
- // By opening all streams first and then starting them one by one we ensure
- // the dispatcher only opens streams for those which will actually be used.
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- dispatcher_->OpenStream();
- }
- for (CallbackMap::iterator it = callbacks_.begin(); it != callbacks_.end();
- ++it) {
- if (it->second->started())
- dispatcher_->StartStream(it->second, it->first);
- }
-}
-
OnMoreDataConverter::OnMoreDataConverter(const AudioParameters& input_params,
const AudioParameters& output_params)
: io_ratio_(static_cast<double>(input_params.GetBytesPerSecond()) /
@@ -357,16 +328,6 @@ void OnMoreDataConverter::Stop() {
int OnMoreDataConverter::OnMoreData(AudioBus* dest,
AudioBuffersState buffers_state) {
- return OnMoreIOData(NULL, dest, buffers_state);
-}
-
-int OnMoreDataConverter::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- // Note: The input portion of OnMoreIOData() is not supported when a converter
- // has been injected. Downstream clients prefer silence to potentially split
- // apart input data.
-
current_buffers_state_ = buffers_state;
audio_converter_.Convert(dest);
@@ -386,8 +347,7 @@ double OnMoreDataConverter::ProvideInput(AudioBus* dest,
buffer_delay.InSecondsF() * input_bytes_per_second_);
// Retrieve data from the original callback.
- const int frames = source_callback_->OnMoreIOData(
- NULL, dest, new_buffers_state);
+ const int frames = source_callback_->OnMoreData(dest, new_buffers_state);
// Zero any unfilled frames if anything was filled, otherwise we'll just
// return a volume of zero and let AudioConverter drop the output.
diff --git a/chromium/media/audio/audio_output_resampler.h b/chromium/media/audio/audio_output_resampler.h
index a8fca232470..fa488aa1956 100644
--- a/chromium/media/audio/audio_output_resampler.h
+++ b/chromium/media/audio/audio_output_resampler.h
@@ -41,7 +41,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
const AudioParameters& input_params,
const AudioParameters& output_params,
const std::string& output_device_id,
- const std::string& input_device_id,
const base::TimeDelta& close_delay);
// AudioOutputDispatcher interface.
@@ -53,8 +52,6 @@ class MEDIA_EXPORT AudioOutputResampler : public AudioOutputDispatcher {
double volume) OVERRIDE;
virtual void CloseStream(AudioOutputProxy* stream_proxy) OVERRIDE;
virtual void Shutdown() OVERRIDE;
- virtual void CloseStreamsForWedgeFix() OVERRIDE;
- virtual void RestartStreamsForWedgeFix() OVERRIDE;
private:
friend class base::RefCountedThreadSafe<AudioOutputResampler>;
diff --git a/chromium/media/audio/audio_parameters.cc b/chromium/media/audio/audio_parameters.cc
index fff815610fe..62b825ff0e5 100644
--- a/chromium/media/audio/audio_parameters.cc
+++ b/chromium/media/audio/audio_parameters.cc
@@ -85,7 +85,7 @@ bool AudioParameters::IsValid() const {
(channels_ > 0) &&
(channels_ <= media::limits::kMaxChannels) &&
(channel_layout_ > CHANNEL_LAYOUT_UNSUPPORTED) &&
- (channel_layout_ < CHANNEL_LAYOUT_MAX) &&
+ (channel_layout_ <= CHANNEL_LAYOUT_MAX) &&
(input_channels_ >= 0) &&
(input_channels_ <= media::limits::kMaxChannels) &&
(sample_rate_ >= media::limits::kMinSampleRate) &&
diff --git a/chromium/media/audio/audio_parameters.h b/chromium/media/audio/audio_parameters.h
index 62ff4fd48f1..b23d26fdcae 100644
--- a/chromium/media/audio/audio_parameters.h
+++ b/chromium/media/audio/audio_parameters.h
@@ -48,7 +48,9 @@ class MEDIA_EXPORT AudioParameters {
// effects should be enabled.
enum PlatformEffectsMask {
NO_EFFECTS = 0x0,
- ECHO_CANCELLER = 0x1
+ ECHO_CANCELLER = 0x1,
+ DUCKING = 0x2, // Enables ducking if the OS supports it.
+ KEYBOARD_MIC = 0x4,
};
AudioParameters();
diff --git a/chromium/media/audio/audio_parameters_unittest.cc b/chromium/media/audio/audio_parameters_unittest.cc
index f0d37129eb9..390b205a091 100644
--- a/chromium/media/audio/audio_parameters_unittest.cc
+++ b/chromium/media/audio/audio_parameters_unittest.cc
@@ -63,6 +63,10 @@ TEST(AudioParameters, GetBytesPerBuffer) {
EXPECT_EQ(800, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
CHANNEL_LAYOUT_STEREO, 1000, 16, 200)
.GetBytesPerBuffer());
+ EXPECT_EQ(300, AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
+ 1000, 8, 100)
+ .GetBytesPerBuffer());
}
TEST(AudioParameters, GetBytesPerSecond) {
@@ -119,6 +123,23 @@ TEST(AudioParameters, Compare) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
2000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LINEAR,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 200),
+
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
1000, 8, 100),
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_MONO,
@@ -152,6 +173,23 @@ TEST(AudioParameters, Compare) {
CHANNEL_LAYOUT_STEREO, 2000, 16, 100),
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_STEREO, 2000, 16, 200),
+
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 1000, 16, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 8, 200),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 100),
+ AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC, 2000, 16, 200),
};
for (size_t i = 0; i < arraysize(values); ++i) {
diff --git a/chromium/media/audio/clockless_audio_sink.cc b/chromium/media/audio/clockless_audio_sink.cc
index ff809d0541d..89f43bf42fd 100644
--- a/chromium/media/audio/clockless_audio_sink.cc
+++ b/chromium/media/audio/clockless_audio_sink.cc
@@ -73,16 +73,12 @@ void ClocklessAudioSink::Initialize(const AudioParameters& params,
}
void ClocklessAudioSink::Start() {
+ DCHECK(initialized_);
DCHECK(!playing_);
}
void ClocklessAudioSink::Stop() {
- DCHECK(initialized_);
-
- if (!playing_)
- return;
-
- playback_time_ = thread_->Stop();
+ Pause();
}
void ClocklessAudioSink::Play() {
@@ -96,7 +92,13 @@ void ClocklessAudioSink::Play() {
}
void ClocklessAudioSink::Pause() {
- Stop();
+ DCHECK(initialized_);
+
+ if (!playing_)
+ return;
+
+ playing_ = false;
+ playback_time_ = thread_->Stop();
}
bool ClocklessAudioSink::SetVolume(double volume) {
diff --git a/chromium/media/audio/clockless_audio_sink.h b/chromium/media/audio/clockless_audio_sink.h
index 9e73b1a8817..bf68896c7aa 100644
--- a/chromium/media/audio/clockless_audio_sink.h
+++ b/chromium/media/audio/clockless_audio_sink.h
@@ -10,7 +10,7 @@
#include "media/base/audio_renderer_sink.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
diff --git a/chromium/media/audio/cras/audio_manager_cras.cc b/chromium/media/audio/cras/audio_manager_cras.cc
index 876d6ce8136..abc78dd4b78 100644
--- a/chromium/media/audio/cras/audio_manager_cras.cc
+++ b/chromium/media/audio/cras/audio_manager_cras.cc
@@ -4,6 +4,8 @@
#include "media/audio/cras/audio_manager_cras.h"
+#include <algorithm>
+
#include "base/command_line.h"
#include "base/environment.h"
#include "base/logging.h"
@@ -13,6 +15,11 @@
#include "media/audio/cras/cras_unified.h"
#include "media/base/channel_layout.h"
+// cras_util.h headers pull in min/max macros...
+// TODO(dgreid): Fix headers such that these aren't imported.
+#undef min
+#undef max
+
namespace media {
static void AddDefaultDevice(AudioDeviceNames* device_names) {
@@ -30,6 +37,13 @@ static const int kMaxOutputStreams = 50;
// Default sample rate for input and output streams.
static const int kDefaultSampleRate = 48000;
+// Define bounds for the output buffer size.
+static const int kMinimumOutputBufferSize = 512;
+static const int kMaximumOutputBufferSize = 8192;
+
+// Default input buffer size.
+static const int kDefaultInputBufferSize = 1024;
+
bool AudioManagerCras::HasAudioOutputDevices() {
return true;
}
@@ -63,12 +77,15 @@ void AudioManagerCras::GetAudioOutputDeviceNames(
AudioParameters AudioManagerCras::GetInputStreamParameters(
const std::string& device_id) {
- static const int kDefaultInputBufferSize = 1024;
+ int user_buffer_size = GetUserBufferSize();
+ int buffer_size = user_buffer_size ?
+ user_buffer_size : kDefaultInputBufferSize;
+
// TODO(hshi): Fine-tune audio parameters based on |device_id|. The optimal
// parameters for the loopback stream may differ from the default.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultInputBufferSize);
+ kDefaultSampleRate, 16, buffer_size);
}
AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
@@ -79,8 +96,7 @@ AudioOutputStream* AudioManagerCras::MakeLinearOutputStream(
AudioOutputStream* AudioManagerCras::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
// TODO(dgreid): Open the correct input device for unified IO.
@@ -104,11 +120,9 @@ AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
- static const int kDefaultOutputBufferSize = 512;
-
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
int sample_rate = kDefaultSampleRate;
- int buffer_size = kDefaultOutputBufferSize;
+ int buffer_size = kMinimumOutputBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
if (input_params.IsValid()) {
@@ -116,7 +130,9 @@ AudioParameters AudioManagerCras::GetPreferredOutputStreamParameters(
bits_per_sample = input_params.bits_per_sample();
channel_layout = input_params.channel_layout();
input_channels = input_params.input_channels();
- buffer_size = input_params.frames_per_buffer();
+ buffer_size =
+ std::min(kMaximumOutputBufferSize,
+ std::max(buffer_size, input_params.frames_per_buffer()));
}
int user_buffer_size = GetUserBufferSize();
@@ -138,4 +154,19 @@ AudioInputStream* AudioManagerCras::MakeInputStream(
return new CrasInputStream(params, this, device_id);
}
+snd_pcm_format_t AudioManagerCras::BitsToFormat(int bits_per_sample) {
+ switch (bits_per_sample) {
+ case 8:
+ return SND_PCM_FORMAT_U8;
+ case 16:
+ return SND_PCM_FORMAT_S16;
+ case 24:
+ return SND_PCM_FORMAT_S24;
+ case 32:
+ return SND_PCM_FORMAT_S32;
+ default:
+ return SND_PCM_FORMAT_UNKNOWN;
+ }
+}
+
} // namespace media
diff --git a/chromium/media/audio/cras/audio_manager_cras.h b/chromium/media/audio/cras/audio_manager_cras.h
index 589374ae0b9..a9abd6c2deb 100644
--- a/chromium/media/audio/cras/audio_manager_cras.h
+++ b/chromium/media/audio/cras/audio_manager_cras.h
@@ -5,6 +5,8 @@
#ifndef MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
#define MEDIA_AUDIO_CRAS_AUDIO_MANAGER_CRAS_H_
+#include <cras_types.h>
+
#include <string>
#include "base/compiler_specific.h"
@@ -33,13 +35,14 @@ class MEDIA_EXPORT AudioManagerCras : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
+ static snd_pcm_format_t BitsToFormat(int bits_per_sample);
+
protected:
virtual ~AudioManagerCras();
diff --git a/chromium/media/audio/cras/cras_input.cc b/chromium/media/audio/cras/cras_input.cc
index c41f3645efd..afdabb21d1f 100644
--- a/chromium/media/audio/cras/cras_input.cc
+++ b/chromium/media/audio/cras/cras_input.cc
@@ -7,10 +7,8 @@
#include <math.h>
#include "base/basictypes.h"
-#include "base/bind.h"
#include "base/logging.h"
#include "base/time/time.h"
-#include "media/audio/alsa/alsa_util.h"
#include "media/audio/audio_manager.h"
#include "media/audio/cras/audio_manager_cras.h"
@@ -29,6 +27,7 @@ CrasInputStream::CrasInputStream(const AudioParameters& params,
stream_direction_(device_id == AudioManagerBase::kLoopbackInputDeviceId ?
CRAS_STREAM_POST_MIX_PRE_DSP : CRAS_STREAM_INPUT) {
DCHECK(audio_manager_);
+ audio_bus_ = AudioBus::Create(params_);
}
CrasInputStream::~CrasInputStream() {
@@ -54,7 +53,7 @@ bool CrasInputStream::Open() {
}
snd_pcm_format_t pcm_format =
- alsa_util::BitsToFormat(params_.bits_per_sample());
+ AudioManagerCras::BitsToFormat(params_.bits_per_sample());
if (pcm_format == SND_PCM_FORMAT_UNKNOWN) {
DLOG(WARNING) << "Unsupported bits/sample: " << params_.bits_per_sample();
return false;
@@ -86,17 +85,14 @@ bool CrasInputStream::Open() {
}
void CrasInputStream::Close() {
+ Stop();
+
if (client_) {
cras_client_stop(client_);
cras_client_destroy(client_);
client_ = NULL;
}
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
-
// Signal to the manager that we're closed and can be removed.
// Should be last call in the method as it deletes "this".
audio_manager_->ReleaseInputStream(this);
@@ -117,7 +113,7 @@ void CrasInputStream::Start(AudioInputCallback* callback) {
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
cras_audio_format* audio_format = cras_audio_format_create(
- alsa_util::BitsToFormat(params_.bits_per_sample()),
+ AudioManagerCras::BitsToFormat(params_.bits_per_sample()),
params_.sample_rate(),
params_.channels());
if (!audio_format) {
@@ -177,6 +173,7 @@ void CrasInputStream::Stop() {
cras_client_rm_stream(client_, stream_id_);
started_ = false;
+ callback_ = NULL;
}
// Static callback asking for samples. Run on high priority thread.
@@ -226,11 +223,9 @@ void CrasInputStream::ReadAudio(size_t frames,
double normalized_volume = 0.0;
GetAgcVolume(&normalized_volume);
- callback_->OnData(this,
- buffer,
- frames * bytes_per_frame_,
- bytes_latency,
- normalized_volume);
+ audio_bus_->FromInterleaved(
+ buffer, audio_bus_->frames(), params_.bits_per_sample() / 8);
+ callback_->OnData(this, audio_bus_.get(), bytes_latency, normalized_volume);
}
void CrasInputStream::NotifyStreamError(int err) {
diff --git a/chromium/media/audio/cras/cras_input.h b/chromium/media/audio/cras/cras_input.h
index dd2cb5474a4..1919224d9a9 100644
--- a/chromium/media/audio/cras/cras_input.h
+++ b/chromium/media/audio/cras/cras_input.h
@@ -10,8 +10,6 @@
#include <string>
#include "base/compiler_specific.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/weak_ptr.h"
#include "media/audio/agc_audio_stream.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
@@ -53,7 +51,7 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
const timespec* sample_ts,
void* arg);
- // Handles notificaiton that there was an error with the playback stream.
+ // Handles notification that there was an error with the playback stream.
static int StreamError(cras_client* client,
cras_stream_id_t stream_id,
int err,
@@ -100,9 +98,11 @@ class CrasInputStream : public AgcAudioStream<AudioInputStream> {
// Direction of the stream.
const CRAS_STREAM_DIRECTION stream_direction_;
+ scoped_ptr<AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(CrasInputStream);
};
} // namespace media
-#endif // MEDIA_AUDIO_CRAS_ALSA_INPUT_H_
+#endif // MEDIA_AUDIO_CRAS_CRAS_INPUT_H_
diff --git a/chromium/media/audio/cras/cras_input_unittest.cc b/chromium/media/audio/cras/cras_input_unittest.cc
index 27ea9858ba1..7081a98e907 100644
--- a/chromium/media/audio/cras/cras_input_unittest.cc
+++ b/chromium/media/audio/cras/cras_input_unittest.cc
@@ -2,18 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <unistd.h>
-
#include <string>
#include "base/synchronization/waitable_event.h"
#include "base/test/test_timeouts.h"
#include "base/time/time.h"
#include "media/audio/cras/audio_manager_cras.h"
-#include "media/audio/cras/cras_input.h"
+#include "media/audio/fake_audio_log_factory.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+// cras_util.h defines custom min/max macros which break compilation, so ensure
+// it's not included until last. #if avoids presubmit errors.
+#if defined(USE_CRAS)
+#include "media/audio/cras/cras_input.h"
+#endif
+
using testing::_;
using testing::AtLeast;
using testing::Ge;
@@ -24,14 +28,15 @@ namespace media {
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData, void(
- AudioInputStream*, const uint8*, uint32, uint32, double));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream*, const AudioBus*, uint32, double));
MOCK_METHOD1(OnError, void(AudioInputStream*));
- MOCK_METHOD1(OnClose, void(AudioInputStream*));
};
class MockAudioManagerCrasInput : public AudioManagerCras {
public:
+ MockAudioManagerCrasInput() : AudioManagerCras(&fake_audio_log_factory_) {}
+
// We need to override this function in order to skip checking the number
// of active output streams. It is because the number of active streams
// is managed inside MakeAudioInputStream, and we don't use
@@ -40,6 +45,9 @@ class MockAudioManagerCrasInput : public AudioManagerCras {
DCHECK(stream);
delete stream;
}
+
+ private:
+ FakeAudioLogFactory fake_audio_log_factory_;
};
class CrasInputStreamTest : public testing::Test {
@@ -77,14 +85,9 @@ class CrasInputStreamTest : public testing::Test {
// samples can be provided when doing non-integer SRC. For example
// converting from 192k to 44.1k is a ratio of 4.35 to 1.
MockAudioInputCallback mock_callback;
- unsigned int expected_size = (kTestFramesPerPacket - 8) *
- params.channels() *
- params.bits_per_sample() / 8;
-
base::WaitableEvent event(false, false);
- EXPECT_CALL(mock_callback,
- OnData(test_stream, _, Ge(expected_size), _, _))
+ EXPECT_CALL(mock_callback, OnData(test_stream, _, _, _))
.WillOnce(InvokeWithoutArgs(&event, &base::WaitableEvent::Signal));
test_stream->Start(&mock_callback);
@@ -93,8 +96,6 @@ class CrasInputStreamTest : public testing::Test {
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
test_stream->Stop();
-
- EXPECT_CALL(mock_callback, OnClose(test_stream)).Times(1);
test_stream->Close();
}
diff --git a/chromium/media/audio/cras/cras_unified.cc b/chromium/media/audio/cras/cras_unified.cc
index c85cf59dd5f..25af3837ad6 100644
--- a/chromium/media/audio/cras/cras_unified.cc
+++ b/chromium/media/audio/cras/cras_unified.cc
@@ -4,11 +4,7 @@
#include "media/audio/cras/cras_unified.h"
-#include <cras_client.h>
-
-#include "base/command_line.h"
#include "base/logging.h"
-#include "media/audio/alsa/alsa_util.h"
#include "media/audio/cras/audio_manager_cras.h"
namespace media {
@@ -116,7 +112,7 @@ bool CrasUnifiedStream::Open() {
return false;
}
- if (alsa_util::BitsToFormat(params_.bits_per_sample()) ==
+ if (AudioManagerCras::BitsToFormat(params_.bits_per_sample()) ==
SND_PCM_FORMAT_UNKNOWN) {
LOG(WARNING) << "Unsupported pcm format";
return false;
@@ -187,7 +183,7 @@ void CrasUnifiedStream::Start(AudioSourceCallback* callback) {
// Prepare |audio_format| and |stream_params| for the stream we
// will create.
cras_audio_format* audio_format = cras_audio_format_create(
- alsa_util::BitsToFormat(params_.bits_per_sample()),
+ AudioManagerCras::BitsToFormat(params_.bits_per_sample()),
params_.sample_rate(),
params_.channels());
if (!audio_format) {
@@ -361,8 +357,7 @@ uint32 CrasUnifiedStream::ReadWriteAudio(size_t frames,
cras_client_calc_playback_latency(output_ts, &latency_ts);
total_delay_bytes += GetBytesLatency(latency_ts);
- int frames_filled = source_callback_->OnMoreIOData(
- input_bus_.get(),
+ int frames_filled = source_callback_->OnMoreData(
output_bus_.get(),
AudioBuffersState(0, total_delay_bytes));
diff --git a/chromium/media/audio/cras/cras_unified.h b/chromium/media/audio/cras/cras_unified.h
index 818763efb49..db1d9feb648 100644
--- a/chromium/media/audio/cras/cras_unified.h
+++ b/chromium/media/audio/cras/cras_unified.h
@@ -10,21 +10,21 @@
#ifndef MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
#define MEDIA_AUDIO_LINUX_CRAS_UNIFIED_H_
-#include <alsa/asoundlib.h>
#include <cras_client.h>
#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
namespace media {
class AudioManagerCras;
-class AudioParameters;
// Implementation of AudioOuputStream for Chrome OS using the Chrome OS audio
// server.
+// TODO(dgreid): This class is used for only output, either remove all the
+// relevant input code and change the class to CrasOutputStream or merge
+// cras_input.cc into this unified implementation.
class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
public:
// The ctor takes all the usual parameters, plus |manager| which is the
@@ -57,7 +57,7 @@ class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
const timespec* output_ts,
void* arg);
- // Handles notificaiton that there was an error with the playback stream.
+ // Handles notification that there was an error with the playback stream.
static int StreamError(cras_client* client,
cras_stream_id_t stream_id,
int err,
@@ -107,7 +107,7 @@ class MEDIA_EXPORT CrasUnifiedStream : public AudioOutputStream {
// Callback to get audio samples.
AudioSourceCallback* source_callback_;
- // Container for exchanging data with AudioSourceCallback::OnMoreIOData().
+ // Container for exchanging data with AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> input_bus_;
scoped_ptr<AudioBus> output_bus_;
diff --git a/chromium/media/audio/cras/cras_unified_unittest.cc b/chromium/media/audio/cras/cras_unified_unittest.cc
index 7083eca427e..9d282bb7505 100644
--- a/chromium/media/audio/cras/cras_unified_unittest.cc
+++ b/chromium/media/audio/cras/cras_unified_unittest.cc
@@ -8,10 +8,17 @@
#include "base/test/test_timeouts.h"
#include "base/time/time.h"
#include "media/audio/cras/audio_manager_cras.h"
-#include "media/audio/cras/cras_unified.h"
+#include "media/audio/fake_audio_log_factory.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+// cras_util.h defines custom min/max macros which break compilation, so ensure
+// it's not included until last. #if avoids presubmit errors.
+#if defined(USE_CRAS)
+#include "media/audio/cras/cras_unified.h"
+#endif
+
using testing::_;
using testing::DoAll;
using testing::InvokeWithoutArgs;
@@ -21,25 +28,18 @@ using testing::StrictMock;
namespace media {
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
class MockAudioManagerCras : public AudioManagerCras {
public:
+ MockAudioManagerCras() : AudioManagerCras(&fake_audio_log_factory_) {}
+
MOCK_METHOD0(Init, void());
MOCK_METHOD0(HasAudioOutputDevices, bool());
MOCK_METHOD0(HasAudioInputDevices, bool());
MOCK_METHOD1(MakeLinearOutputStream, AudioOutputStream*(
const AudioParameters& params));
- MOCK_METHOD1(MakeLowLatencyOutputStream, AudioOutputStream*(
- const AudioParameters& params));
+ MOCK_METHOD2(MakeLowLatencyOutputStream,
+ AudioOutputStream*(const AudioParameters& params,
+ const std::string& device_id));
MOCK_METHOD2(MakeLinearOutputStream, AudioInputStream*(
const AudioParameters& params, const std::string& device_id));
MOCK_METHOD2(MakeLowLatencyInputStream, AudioInputStream*(
@@ -53,6 +53,9 @@ class MockAudioManagerCras : public AudioManagerCras {
DCHECK(stream);
delete stream;
}
+
+ private:
+ FakeAudioLogFactory fake_audio_log_factory_;
};
class CrasUnifiedStreamTest : public testing::Test {
diff --git a/chromium/media/audio/fake_audio_consumer.cc b/chromium/media/audio/fake_audio_consumer.cc
index 55c439ad9f3..ca99424f419 100644
--- a/chromium/media/audio/fake_audio_consumer.cc
+++ b/chromium/media/audio/fake_audio_consumer.cc
@@ -7,10 +7,10 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/cancelable_callback.h"
+#include "base/location.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
@@ -22,7 +22,7 @@ namespace media {
class FakeAudioConsumer::Worker
: public base::RefCountedThreadSafe<FakeAudioConsumer::Worker> {
public:
- Worker(const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ Worker(const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params);
bool IsStopped();
@@ -44,7 +44,7 @@ class FakeAudioConsumer::Worker
// the worker loop.
void DoRead();
- const scoped_refptr<base::MessageLoopProxy> worker_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
const scoped_ptr<AudioBus> audio_bus_;
const base::TimeDelta buffer_duration_;
@@ -61,9 +61,9 @@ class FakeAudioConsumer::Worker
};
FakeAudioConsumer::FakeAudioConsumer(
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params)
- : worker_(new Worker(worker_loop, params)) {
+ : worker_(new Worker(worker_task_runner, params)) {
}
FakeAudioConsumer::~FakeAudioConsumer() {
@@ -80,9 +80,9 @@ void FakeAudioConsumer::Stop() {
}
FakeAudioConsumer::Worker::Worker(
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AudioParameters& params)
- : worker_loop_(worker_loop),
+ : worker_task_runner_(worker_task_runner),
audio_bus_(AudioBus::Create(params)),
buffer_duration_(base::TimeDelta::FromMicroseconds(
params.frames_per_buffer() * base::Time::kMicrosecondsPerSecond /
@@ -111,11 +111,11 @@ void FakeAudioConsumer::Worker::Start(const ReadCB& read_cb) {
DCHECK(read_cb_.is_null());
read_cb_ = read_cb;
}
- worker_loop_->PostTask(FROM_HERE, base::Bind(&Worker::DoStart, this));
+ worker_task_runner_->PostTask(FROM_HERE, base::Bind(&Worker::DoStart, this));
}
void FakeAudioConsumer::Worker::DoStart() {
- DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
next_read_time_ = base::TimeTicks::Now();
read_task_cb_.Reset(base::Bind(&Worker::DoRead, this));
read_task_cb_.callback().Run();
@@ -129,16 +129,16 @@ void FakeAudioConsumer::Worker::Stop() {
return;
read_cb_.Reset();
}
- worker_loop_->PostTask(FROM_HERE, base::Bind(&Worker::DoCancel, this));
+ worker_task_runner_->PostTask(FROM_HERE, base::Bind(&Worker::DoCancel, this));
}
void FakeAudioConsumer::Worker::DoCancel() {
- DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
read_task_cb_.Cancel();
}
void FakeAudioConsumer::Worker::DoRead() {
- DCHECK(worker_loop_->BelongsToCurrentThread());
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
{
base::AutoLock scoped_lock(read_cb_lock_);
@@ -156,7 +156,8 @@ void FakeAudioConsumer::Worker::DoRead() {
delay += buffer_duration_ * (-delay / buffer_duration_ + 1);
next_read_time_ = now + delay;
- worker_loop_->PostDelayedTask(FROM_HERE, read_task_cb_.callback(), delay);
+ worker_task_runner_->PostDelayedTask(
+ FROM_HERE, read_task_cb_.callback(), delay);
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_consumer.h b/chromium/media/audio/fake_audio_consumer.h
index 50373565d00..18c552ad97b 100644
--- a/chromium/media/audio/fake_audio_consumer.h
+++ b/chromium/media/audio/fake_audio_consumer.h
@@ -10,7 +10,7 @@
#include "media/base/media_export.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -21,12 +21,13 @@ class AudioParameters;
// simulate a real time consumer of audio data.
class MEDIA_EXPORT FakeAudioConsumer {
public:
- // |worker_loop| is the loop on which the ReadCB provided to Start() will be
- // executed on. This may or may not be the be for the same thread that
- // invokes the Start/Stop methods.
+ // |worker_task_runner| is the task runner on which the ReadCB provided to
+ // Start() will be executed on. This may or may not be the be for the same
+ // thread that invokes the Start/Stop methods.
// |params| is used to determine the frequency of callbacks.
- FakeAudioConsumer(const scoped_refptr<base::MessageLoopProxy>& worker_loop,
- const AudioParameters& params);
+ FakeAudioConsumer(
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
+ const AudioParameters& params);
~FakeAudioConsumer();
// Start executing |read_cb| at a regular intervals. Stop() must be called by
diff --git a/chromium/media/audio/fake_audio_input_stream.cc b/chromium/media/audio/fake_audio_input_stream.cc
index a00a9b62001..384adcb411c 100644
--- a/chromium/media/audio/fake_audio_input_stream.cc
+++ b/chromium/media/audio/fake_audio_input_stream.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/lazy_instance.h"
#include "media/audio/audio_manager_base.h"
+#include "media/base/audio_bus.h"
using base::TimeTicks;
using base::TimeDelta;
@@ -20,10 +21,16 @@ namespace {
const int kBeepDurationMilliseconds = 20;
const int kBeepFrequency = 400;
+// Intervals between two automatic beeps.
+const int kAutomaticBeepIntervalInMs = 500;
+
+// Automatic beep will be triggered every |kAutomaticBeepIntervalInMs| unless
+// users explicitly call BeepOnce(), which will disable the automatic beep.
struct BeepContext {
- BeepContext() : beep_once(false) {}
+ BeepContext() : beep_once(false), automatic(true) {}
base::Lock beep_lock;
bool beep_once;
+ bool automatic;
};
static base::LazyInstance<BeepContext> g_beep_context =
@@ -42,17 +49,20 @@ FakeAudioInputStream::FakeAudioInputStream(AudioManagerBase* manager,
: audio_manager_(manager),
callback_(NULL),
buffer_size_((params.channels() * params.bits_per_sample() *
- params.frames_per_buffer()) / 8),
+ params.frames_per_buffer()) /
+ 8),
params_(params),
thread_("FakeAudioRecordingThread"),
callback_interval_(base::TimeDelta::FromMilliseconds(
(params.frames_per_buffer() * 1000) / params.sample_rate())),
- beep_duration_in_buffers_(
- kBeepDurationMilliseconds * params.sample_rate() /
- params.frames_per_buffer() / 1000),
+ beep_duration_in_buffers_(kBeepDurationMilliseconds *
+ params.sample_rate() /
+ params.frames_per_buffer() /
+ 1000),
beep_generated_in_buffers_(0),
beep_period_in_frames_(params.sample_rate() / kBeepFrequency),
- frames_elapsed_(0) {
+ frames_elapsed_(0),
+ audio_bus_(AudioBus::Create(params)) {
}
FakeAudioInputStream::~FakeAudioInputStream() {}
@@ -60,11 +70,13 @@ FakeAudioInputStream::~FakeAudioInputStream() {}
bool FakeAudioInputStream::Open() {
buffer_.reset(new uint8[buffer_size_]);
memset(buffer_.get(), 0, buffer_size_);
+ audio_bus_->Zero();
return true;
}
void FakeAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(!thread_.IsRunning());
+ DCHECK(!callback_);
callback_ = callback;
last_callback_time_ = TimeTicks::Now();
thread_.Start();
@@ -77,14 +89,37 @@ void FakeAudioInputStream::Start(AudioInputCallback* callback) {
void FakeAudioInputStream::DoCallback() {
DCHECK(callback_);
+ const TimeTicks now = TimeTicks::Now();
+ base::TimeDelta next_callback_time =
+ last_callback_time_ + callback_interval_ * 2 - now;
+
+ // If we are falling behind, try to catch up as much as we can in the next
+ // callback.
+ if (next_callback_time < base::TimeDelta())
+ next_callback_time = base::TimeDelta();
+
+ // Accumulate the time from the last beep.
+ interval_from_last_beep_ += now - last_callback_time_;
+
+ last_callback_time_ = now;
+
memset(buffer_.get(), 0, buffer_size_);
bool should_beep = false;
{
BeepContext* beep_context = g_beep_context.Pointer();
base::AutoLock auto_lock(beep_context->beep_lock);
- should_beep = beep_context->beep_once;
- beep_context->beep_once = false;
+ if (beep_context->automatic) {
+ base::TimeDelta delta = interval_from_last_beep_ -
+ TimeDelta::FromMilliseconds(kAutomaticBeepIntervalInMs);
+ if (delta > base::TimeDelta()) {
+ should_beep = true;
+ interval_from_last_beep_ = delta;
+ }
+ } else {
+ should_beep = beep_context->beep_once;
+ beep_context->beep_once = false;
+ }
}
// If this object was instructed to generate a beep or has started to
@@ -102,7 +137,6 @@ void FakeAudioInputStream::DoCallback() {
while (position + high_bytes <= buffer_size_) {
// Write high values first.
memset(buffer_.get() + position, 128, high_bytes);
-
// Then leave low values in the buffer with |high_bytes|.
position += high_bytes * 2;
}
@@ -112,19 +146,11 @@ void FakeAudioInputStream::DoCallback() {
beep_generated_in_buffers_ = 0;
}
- callback_->OnData(this, buffer_.get(), buffer_size_, buffer_size_, 1.0);
+ audio_bus_->FromInterleaved(
+ buffer_.get(), audio_bus_->frames(), params_.bits_per_sample() / 8);
+ callback_->OnData(this, audio_bus_.get(), buffer_size_, 1.0);
frames_elapsed_ += params_.frames_per_buffer();
- const TimeTicks now = TimeTicks::Now();
- base::TimeDelta next_callback_time =
- last_callback_time_ + callback_interval_ * 2 - now;
-
- // If we are falling behind, try to catch up as much as we can in the next
- // callback.
- if (next_callback_time < base::TimeDelta())
- next_callback_time = base::TimeDelta();
-
- last_callback_time_ = now;
thread_.message_loop()->PostDelayedTask(
FROM_HERE,
base::Bind(&FakeAudioInputStream::DoCallback, base::Unretained(this)),
@@ -133,13 +159,10 @@ void FakeAudioInputStream::DoCallback() {
void FakeAudioInputStream::Stop() {
thread_.Stop();
+ callback_ = NULL;
}
void FakeAudioInputStream::Close() {
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
audio_manager_->ReleaseInputStream(this);
}
@@ -165,6 +188,7 @@ void FakeAudioInputStream::BeepOnce() {
BeepContext* beep_context = g_beep_context.Pointer();
base::AutoLock auto_lock(beep_context->beep_lock);
beep_context->beep_once = true;
+ beep_context->automatic = false;
}
} // namespace media
diff --git a/chromium/media/audio/fake_audio_input_stream.h b/chromium/media/audio/fake_audio_input_stream.h
index 5879ab39763..e6c625e6b3a 100644
--- a/chromium/media/audio/fake_audio_input_stream.h
+++ b/chromium/media/audio/fake_audio_input_stream.h
@@ -18,6 +18,7 @@
namespace media {
+class AudioBus;
class AudioManagerBase;
class MEDIA_EXPORT FakeAudioInputStream
@@ -63,10 +64,12 @@ class MEDIA_EXPORT FakeAudioInputStream
base::Thread thread_;
base::TimeTicks last_callback_time_;
base::TimeDelta callback_interval_;
+ base::TimeDelta interval_from_last_beep_;
int beep_duration_in_buffers_;
int beep_generated_in_buffers_;
int beep_period_in_frames_;
int frames_elapsed_;
+ scoped_ptr<media::AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioInputStream);
};
diff --git a/chromium/media/audio/fake_audio_log_factory.cc b/chromium/media/audio/fake_audio_log_factory.cc
index 6f752e559fd..5e2d134520c 100644
--- a/chromium/media/audio/fake_audio_log_factory.cc
+++ b/chromium/media/audio/fake_audio_log_factory.cc
@@ -12,8 +12,7 @@ class FakeAudioLogImpl : public AudioLog {
virtual ~FakeAudioLogImpl() {}
virtual void OnCreated(int component_id,
const media::AudioParameters& params,
- const std::string& input_device_id,
- const std::string& output_device_id) OVERRIDE {}
+ const std::string& device_id) OVERRIDE {}
virtual void OnStarted(int component_id) OVERRIDE {}
virtual void OnStopped(int component_id) OVERRIDE {}
virtual void OnClosed(int component_id) OVERRIDE {}
diff --git a/chromium/media/audio/fake_audio_manager.cc b/chromium/media/audio/fake_audio_manager.cc
index bfe9a0a7ff3..e5d9bd4d8c8 100644
--- a/chromium/media/audio/fake_audio_manager.cc
+++ b/chromium/media/audio/fake_audio_manager.cc
@@ -33,8 +33,7 @@ AudioOutputStream* FakeAudioManager::MakeLinearOutputStream(
AudioOutputStream* FakeAudioManager::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
return FakeAudioOutputStream::MakeFakeStream(this, params);
}
diff --git a/chromium/media/audio/fake_audio_manager.h b/chromium/media/audio/fake_audio_manager.h
index b5c45201ed1..9fbf140c6c0 100644
--- a/chromium/media/audio/fake_audio_manager.h
+++ b/chromium/media/audio/fake_audio_manager.h
@@ -26,8 +26,7 @@ class MEDIA_EXPORT FakeAudioManager : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(const AudioParameters& params,
const std::string& device_id)
OVERRIDE;
diff --git a/chromium/media/audio/fake_audio_output_stream.cc b/chromium/media/audio/fake_audio_output_stream.cc
index fb460ab6805..0448c23f8b7 100644
--- a/chromium/media/audio/fake_audio_output_stream.cc
+++ b/chromium/media/audio/fake_audio_output_stream.cc
@@ -7,7 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager_base.h"
namespace media {
@@ -22,7 +22,7 @@ FakeAudioOutputStream::FakeAudioOutputStream(AudioManagerBase* manager,
const AudioParameters& params)
: audio_manager_(manager),
callback_(NULL),
- fake_consumer_(manager->GetWorkerLoop(), params) {
+ fake_consumer_(manager->GetWorkerTaskRunner(), params) {
}
FakeAudioOutputStream::~FakeAudioOutputStream() {
@@ -30,26 +30,26 @@ FakeAudioOutputStream::~FakeAudioOutputStream() {
}
bool FakeAudioOutputStream::Open() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
return true;
}
void FakeAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
callback_ = callback;
fake_consumer_.Start(base::Bind(
&FakeAudioOutputStream::CallOnMoreData, base::Unretained(this)));
}
void FakeAudioOutputStream::Stop() {
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
fake_consumer_.Stop();
callback_ = NULL;
}
void FakeAudioOutputStream::Close() {
DCHECK(!callback_);
- DCHECK(audio_manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetTaskRunner()->BelongsToCurrentThread());
audio_manager_->ReleaseOutputStream(this);
}
@@ -60,7 +60,7 @@ void FakeAudioOutputStream::GetVolume(double* volume) {
};
void FakeAudioOutputStream::CallOnMoreData(AudioBus* audio_bus) {
- DCHECK(audio_manager_->GetWorkerLoop()->BelongsToCurrentThread());
+ DCHECK(audio_manager_->GetWorkerTaskRunner()->BelongsToCurrentThread());
callback_->OnMoreData(audio_bus, AudioBuffersState());
}
diff --git a/chromium/media/audio/linux/audio_manager_linux.cc b/chromium/media/audio/linux/audio_manager_linux.cc
index eaeb2f332b9..e7824b4d6c8 100644
--- a/chromium/media/audio/linux/audio_manager_linux.cc
+++ b/chromium/media/audio/linux/audio_manager_linux.cc
@@ -23,13 +23,13 @@ enum LinuxAudioIO {
kPulse,
kAlsa,
kCras,
- kAudioIOMax // Must always be last!
+ kAudioIOMax = kCras // Must always be equal to largest logged entry.
};
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
#if defined(USE_CRAS)
if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kUseCras)) {
- UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kCras, kAudioIOMax + 1);
return new AudioManagerCras(audio_log_factory);
}
#endif
@@ -37,13 +37,13 @@ AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
#if defined(USE_PULSEAUDIO)
AudioManager* manager = AudioManagerPulse::Create(audio_log_factory);
if (manager) {
- UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kPulse, kAudioIOMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kPulse, kAudioIOMax + 1);
return manager;
}
#endif
#if defined(USE_ALSA)
- UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kAlsa, kAudioIOMax);
+ UMA_HISTOGRAM_ENUMERATION("Media.LinuxAudioIO", kAlsa, kAudioIOMax + 1);
return new AudioManagerAlsa(audio_log_factory);
#else
return new FakeAudioManager(audio_log_factory);
diff --git a/chromium/media/audio/mac/aggregate_device_manager.cc b/chromium/media/audio/mac/aggregate_device_manager.cc
deleted file mode 100644
index c7f323322e7..00000000000
--- a/chromium/media/audio/mac/aggregate_device_manager.cc
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/aggregate_device_manager.h"
-
-#include <CoreAudio/AudioHardware.h>
-#include <string>
-
-#include "base/mac/mac_logging.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/mac/audio_manager_mac.h"
-
-using base::ScopedCFTypeRef;
-
-namespace media {
-
-AggregateDeviceManager::AggregateDeviceManager()
- : plugin_id_(kAudioObjectUnknown),
- input_device_(kAudioDeviceUnknown),
- output_device_(kAudioDeviceUnknown),
- aggregate_device_(kAudioObjectUnknown) {
-}
-
-AggregateDeviceManager::~AggregateDeviceManager() {
- DestroyAggregateDevice();
-}
-
-AudioDeviceID AggregateDeviceManager::GetDefaultAggregateDevice() {
- AudioDeviceID current_input_device;
- AudioDeviceID current_output_device;
- AudioManagerMac::GetDefaultInputDevice(&current_input_device);
- AudioManagerMac::GetDefaultOutputDevice(&current_output_device);
-
- if (AudioManagerMac::HardwareSampleRateForDevice(current_input_device) !=
- AudioManagerMac::HardwareSampleRateForDevice(current_output_device)) {
- // TODO(crogers): with some extra work we can make aggregate devices work
- // if the clock domain is the same but the sample-rate differ.
- // For now we fallback to the synchronized path.
- return kAudioDeviceUnknown;
- }
-
- // Use a lazily created aggregate device if it's already available
- // and still appropriate.
- if (aggregate_device_ != kAudioObjectUnknown) {
- // TODO(crogers): handle default device changes for synchronized I/O.
- // For now, we check to make sure the default devices haven't changed
- // since we lazily created the aggregate device.
- if (current_input_device == input_device_ &&
- current_output_device == output_device_)
- return aggregate_device_;
-
- // For now, once lazily created don't attempt to create another
- // aggregate device.
- return kAudioDeviceUnknown;
- }
-
- input_device_ = current_input_device;
- output_device_ = current_output_device;
-
- // Only create an aggregrate device if the clock domains match.
- UInt32 input_clockdomain = GetClockDomain(input_device_);
- UInt32 output_clockdomain = GetClockDomain(output_device_);
- DVLOG(1) << "input_clockdomain: " << input_clockdomain;
- DVLOG(1) << "output_clockdomain: " << output_clockdomain;
-
- if (input_clockdomain == 0 || input_clockdomain != output_clockdomain)
- return kAudioDeviceUnknown;
-
- OSStatus result = CreateAggregateDevice(
- input_device_,
- output_device_,
- &aggregate_device_);
- if (result != noErr)
- DestroyAggregateDevice();
-
- return aggregate_device_;
-}
-
-CFStringRef AggregateDeviceManager::GetDeviceUID(AudioDeviceID id) {
- static const AudioObjectPropertyAddress kDeviceUIDAddress = {
- kAudioDevicePropertyDeviceUID,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- // As stated in the CoreAudio header (AudioHardwareBase.h),
- // the caller is responsible for releasing the device_UID.
- CFStringRef device_UID;
- UInt32 size = sizeof(device_UID);
- OSStatus result = AudioObjectGetPropertyData(
- id,
- &kDeviceUIDAddress,
- 0,
- 0,
- &size,
- &device_UID);
-
- return (result == noErr) ? device_UID : NULL;
-}
-
-void AggregateDeviceManager::GetDeviceName(
- AudioDeviceID id, char* name, UInt32 size) {
- static const AudioObjectPropertyAddress kDeviceNameAddress = {
- kAudioDevicePropertyDeviceName,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- OSStatus result = AudioObjectGetPropertyData(
- id,
- &kDeviceNameAddress,
- 0,
- 0,
- &size,
- name);
-
- if (result != noErr && size > 0)
- name[0] = 0;
-}
-
-UInt32 AggregateDeviceManager::GetClockDomain(AudioDeviceID device_id) {
- static const AudioObjectPropertyAddress kClockDomainAddress = {
- kAudioDevicePropertyClockDomain,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 clockdomain = 0;
- UInt32 size = sizeof(UInt32);
- OSStatus result = AudioObjectGetPropertyData(
- device_id,
- &kClockDomainAddress,
- 0,
- 0,
- &size,
- &clockdomain);
-
- return (result == noErr) ? clockdomain : 0;
-}
-
-OSStatus AggregateDeviceManager::GetPluginID(AudioObjectID* id) {
- DCHECK(id);
-
- // Get the audio hardware plugin.
- CFStringRef bundle_name = CFSTR("com.apple.audio.CoreAudio");
-
- AudioValueTranslation plugin_translation;
- plugin_translation.mInputData = &bundle_name;
- plugin_translation.mInputDataSize = sizeof(bundle_name);
- plugin_translation.mOutputData = id;
- plugin_translation.mOutputDataSize = sizeof(*id);
-
- static const AudioObjectPropertyAddress kPlugInAddress = {
- kAudioHardwarePropertyPlugInForBundleID,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 size = sizeof(plugin_translation);
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &kPlugInAddress,
- 0,
- 0,
- &size,
- &plugin_translation);
-
- DVLOG(1) << "CoreAudio plugin ID: " << *id;
-
- return result;
-}
-
-CFMutableDictionaryRef
-AggregateDeviceManager::CreateAggregateDeviceDictionary(
- AudioDeviceID input_id,
- AudioDeviceID output_id) {
- CFMutableDictionaryRef aggregate_device_dict = CFDictionaryCreateMutable(
- NULL,
- 0,
- &kCFTypeDictionaryKeyCallBacks,
- &kCFTypeDictionaryValueCallBacks);
- if (!aggregate_device_dict)
- return NULL;
-
- const CFStringRef kAggregateDeviceName =
- CFSTR("ChromeAggregateAudioDevice");
- const CFStringRef kAggregateDeviceUID =
- CFSTR("com.google.chrome.AggregateAudioDevice");
-
- // Add name and UID of the device to the dictionary.
- CFDictionaryAddValue(
- aggregate_device_dict,
- CFSTR(kAudioAggregateDeviceNameKey),
- kAggregateDeviceName);
- CFDictionaryAddValue(
- aggregate_device_dict,
- CFSTR(kAudioAggregateDeviceUIDKey),
- kAggregateDeviceUID);
-
- // Add a "private aggregate key" to the dictionary.
- // The 1 value means that the created aggregate device will
- // only be accessible from the process that created it, and
- // won't be visible to outside processes.
- int value = 1;
- ScopedCFTypeRef<CFNumberRef> aggregate_device_number(CFNumberCreate(
- NULL,
- kCFNumberIntType,
- &value));
- CFDictionaryAddValue(
- aggregate_device_dict,
- CFSTR(kAudioAggregateDeviceIsPrivateKey),
- aggregate_device_number);
-
- return aggregate_device_dict;
-}
-
-CFMutableArrayRef
-AggregateDeviceManager::CreateSubDeviceArray(
- CFStringRef input_device_UID, CFStringRef output_device_UID) {
- CFMutableArrayRef sub_devices_array = CFArrayCreateMutable(
- NULL,
- 0,
- &kCFTypeArrayCallBacks);
-
- CFArrayAppendValue(sub_devices_array, input_device_UID);
- CFArrayAppendValue(sub_devices_array, output_device_UID);
-
- return sub_devices_array;
-}
-
-OSStatus AggregateDeviceManager::CreateAggregateDevice(
- AudioDeviceID input_id,
- AudioDeviceID output_id,
- AudioDeviceID* aggregate_device) {
- DCHECK(aggregate_device);
-
- const size_t kMaxDeviceNameLength = 256;
-
- scoped_ptr<char[]> input_device_name(new char[kMaxDeviceNameLength]);
- GetDeviceName(
- input_id,
- input_device_name.get(),
- sizeof(input_device_name));
- DVLOG(1) << "Input device: \n" << input_device_name;
-
- scoped_ptr<char[]> output_device_name(new char[kMaxDeviceNameLength]);
- GetDeviceName(
- output_id,
- output_device_name.get(),
- sizeof(output_device_name));
- DVLOG(1) << "Output device: \n" << output_device_name;
-
- OSStatus result = GetPluginID(&plugin_id_);
- if (result != noErr)
- return result;
-
- // Create a dictionary for the aggregate device.
- ScopedCFTypeRef<CFMutableDictionaryRef> aggregate_device_dict(
- CreateAggregateDeviceDictionary(input_id, output_id));
- if (!aggregate_device_dict)
- return -1;
-
- // Create the aggregate device.
- static const AudioObjectPropertyAddress kCreateAggregateDeviceAddress = {
- kAudioPlugInCreateAggregateDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 size = sizeof(*aggregate_device);
- result = AudioObjectGetPropertyData(
- plugin_id_,
- &kCreateAggregateDeviceAddress,
- sizeof(aggregate_device_dict),
- &aggregate_device_dict,
- &size,
- aggregate_device);
- if (result != noErr) {
- DLOG(ERROR) << "Error creating aggregate audio device!";
- return result;
- }
-
- // Set the sub-devices for the aggregate device.
- // In this case we use two: the input and output devices.
-
- ScopedCFTypeRef<CFStringRef> input_device_UID(GetDeviceUID(input_id));
- ScopedCFTypeRef<CFStringRef> output_device_UID(GetDeviceUID(output_id));
- if (!input_device_UID || !output_device_UID) {
- DLOG(ERROR) << "Error getting audio device UID strings.";
- return -1;
- }
-
- ScopedCFTypeRef<CFMutableArrayRef> sub_devices_array(
- CreateSubDeviceArray(input_device_UID, output_device_UID));
- if (sub_devices_array == NULL) {
- DLOG(ERROR) << "Error creating sub-devices array.";
- return -1;
- }
-
- static const AudioObjectPropertyAddress kSetSubDevicesAddress = {
- kAudioAggregateDevicePropertyFullSubDeviceList,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- size = sizeof(CFMutableArrayRef);
- result = AudioObjectSetPropertyData(
- *aggregate_device,
- &kSetSubDevicesAddress,
- 0,
- NULL,
- size,
- &sub_devices_array);
- if (result != noErr) {
- DLOG(ERROR) << "Error setting aggregate audio device sub-devices!";
- return result;
- }
-
- // Use the input device as the master device.
- static const AudioObjectPropertyAddress kSetMasterDeviceAddress = {
- kAudioAggregateDevicePropertyMasterSubDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- size = sizeof(CFStringRef);
- result = AudioObjectSetPropertyData(
- *aggregate_device,
- &kSetMasterDeviceAddress,
- 0,
- NULL,
- size,
- &input_device_UID);
- if (result != noErr) {
- DLOG(ERROR) << "Error setting aggregate audio device master device!";
- return result;
- }
-
- DVLOG(1) << "New aggregate device: " << *aggregate_device;
- return noErr;
-}
-
-void AggregateDeviceManager::DestroyAggregateDevice() {
- if (aggregate_device_ == kAudioObjectUnknown)
- return;
-
- static const AudioObjectPropertyAddress kDestroyAddress = {
- kAudioPlugInDestroyAggregateDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
-
- UInt32 size = sizeof(aggregate_device_);
- OSStatus result = AudioObjectGetPropertyData(
- plugin_id_,
- &kDestroyAddress,
- 0,
- NULL,
- &size,
- &aggregate_device_);
- if (result != noErr) {
- DLOG(ERROR) << "Error destroying aggregate audio device!";
- return;
- }
-
- aggregate_device_ = kAudioObjectUnknown;
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/aggregate_device_manager.h b/chromium/media/audio/mac/aggregate_device_manager.h
deleted file mode 100644
index 7b8b71ff655..00000000000
--- a/chromium/media/audio/mac/aggregate_device_manager.h
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
-#define MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
-
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "media/base/media_export.h"
-
-namespace media {
-
-class MEDIA_EXPORT AggregateDeviceManager {
- public:
- AggregateDeviceManager();
- ~AggregateDeviceManager();
-
- // Lazily creates an aggregate device based on the default
- // input and output devices.
- // It will either return a valid device or kAudioDeviceUnknown
- // if the default devices are not suitable for aggregate devices.
- AudioDeviceID GetDefaultAggregateDevice();
-
- private:
- // The caller is responsible for releasing the CFStringRef.
- static CFStringRef GetDeviceUID(AudioDeviceID id);
-
- static void GetDeviceName(AudioDeviceID id, char* name, UInt32 size);
- static UInt32 GetClockDomain(AudioDeviceID device_id);
- static OSStatus GetPluginID(AudioObjectID* id);
-
- CFMutableDictionaryRef CreateAggregateDeviceDictionary(
- AudioDeviceID input_id,
- AudioDeviceID output_id);
-
- CFMutableArrayRef CreateSubDeviceArray(CFStringRef input_device_UID,
- CFStringRef output_device_UID);
-
- OSStatus CreateAggregateDevice(AudioDeviceID input_id,
- AudioDeviceID output_id,
- AudioDeviceID* aggregate_device);
- void DestroyAggregateDevice();
-
- AudioObjectID plugin_id_;
- AudioDeviceID input_device_;
- AudioDeviceID output_device_;
-
- AudioDeviceID aggregate_device_;
-
- DISALLOW_COPY_AND_ASSIGN(AggregateDeviceManager);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AGGREGATE_DEVICE_MANAGER_H_
diff --git a/chromium/media/audio/mac/audio_auhal_mac.cc b/chromium/media/audio/mac/audio_auhal_mac.cc
index 9fcd46a6a95..41fc57c553f 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac.cc
@@ -7,22 +7,17 @@
#include <CoreServices/CoreServices.h>
#include "base/basictypes.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
+#include "base/time/time.h"
#include "media/audio/mac/audio_manager_mac.h"
#include "media/base/audio_pull_fifo.h"
namespace media {
-static void ZeroBufferList(AudioBufferList* buffer_list) {
- for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i) {
- memset(buffer_list->mBuffers[i].mData,
- 0,
- buffer_list->mBuffers[i].mDataByteSize);
- }
-}
-
static void WrapBufferList(AudioBufferList* buffer_list,
AudioBus* bus,
int frames) {
@@ -48,7 +43,6 @@ AUHALStream::AUHALStream(
AudioDeviceID device)
: manager_(manager),
params_(params),
- input_channels_(params_.input_channels()),
output_channels_(params_.channels()),
number_of_frames_(params_.frames_per_buffer()),
source_(NULL),
@@ -57,14 +51,12 @@ AUHALStream::AUHALStream(
volume_(1),
hardware_latency_frames_(0),
stopped_(false),
- input_buffer_list_(NULL),
current_hardware_pending_bytes_(0) {
// We must have a manager.
DCHECK(manager_);
VLOG(1) << "AUHALStream::AUHALStream()";
VLOG(1) << "Device: " << device;
- VLOG(1) << "Input channels: " << input_channels_;
VLOG(1) << "Output channels: " << output_channels_;
VLOG(1) << "Sample rate: " << params_.sample_rate();
VLOG(1) << "Buffer size: " << number_of_frames_;
@@ -74,27 +66,15 @@ AUHALStream::~AUHALStream() {
}
bool AUHALStream::Open() {
- // Get the total number of input and output channels that the
+ // Get the total number of output channels that the
// hardware supports.
- int device_input_channels;
- bool got_input_channels = AudioManagerMac::GetDeviceChannels(
- device_,
- kAudioDevicePropertyScopeInput,
- &device_input_channels);
-
int device_output_channels;
bool got_output_channels = AudioManagerMac::GetDeviceChannels(
device_,
kAudioDevicePropertyScopeOutput,
&device_output_channels);
- // Sanity check the requested I/O channels.
- if (!got_input_channels ||
- input_channels_ < 0 || input_channels_ > device_input_channels) {
- LOG(ERROR) << "AudioDevice does not support requested input channels.";
- return false;
- }
-
+ // Sanity check the requested output channels.
if (!got_output_channels ||
output_channels_ <= 0 || output_channels_ > device_output_channels) {
LOG(ERROR) << "AudioDevice does not support requested output channels.";
@@ -110,7 +90,10 @@ bool AUHALStream::Open() {
return false;
}
- CreateIOBusses();
+ // The output bus will wrap the AudioBufferList given to us in
+ // the Render() callback.
+ DCHECK_GT(output_channels_, 0);
+ output_bus_ = AudioBus::CreateWrapper(output_channels_);
bool configured = ConfigureAUHAL();
if (configured)
@@ -120,13 +103,6 @@ bool AUHALStream::Open() {
}
void AUHALStream::Close() {
- if (input_buffer_list_) {
- input_buffer_list_storage_.reset();
- input_buffer_list_ = NULL;
- input_bus_.reset(NULL);
- output_bus_.reset(NULL);
- }
-
if (audio_unit_) {
OSStatus result = AudioUnitUninitialize(audio_unit_);
OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
@@ -148,6 +124,18 @@ void AUHALStream::Start(AudioSourceCallback* callback) {
return;
}
+ // Check if we should defer Start() for http://crbug.com/160920.
+ if (manager_->ShouldDeferStreamStart()) {
+ // Use a cancellable closure so that if Stop() is called before Start()
+ // actually runs, we can cancel the pending start.
+ deferred_start_cb_.Reset(
+ base::Bind(&AUHALStream::Start, base::Unretained(this), callback));
+ manager_->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE, deferred_start_cb_.callback(), base::TimeDelta::FromSeconds(
+ AudioManagerMac::kStartDelayInSecsForPowerEvents));
+ return;
+ }
+
stopped_ = false;
audio_fifo_.reset();
{
@@ -165,6 +153,7 @@ void AUHALStream::Start(AudioSourceCallback* callback) {
}
void AUHALStream::Stop() {
+ deferred_start_cb_.Cancel();
if (stopped_)
return;
@@ -196,43 +185,25 @@ OSStatus AUHALStream::Render(
const AudioTimeStamp* output_time_stamp,
UInt32 bus_number,
UInt32 number_of_frames,
- AudioBufferList* io_data) {
+ AudioBufferList* data) {
TRACE_EVENT0("audio", "AUHALStream::Render");
// If the stream parameters change for any reason, we need to insert a FIFO
- // since the OnMoreData() pipeline can't handle frame size changes. Generally
- // this is a temporary situation which can occur after a device change has
- // occurred but the AudioManager hasn't received the notification yet.
+ // since the OnMoreData() pipeline can't handle frame size changes.
if (number_of_frames != number_of_frames_) {
// Create a FIFO on the fly to handle any discrepancies in callback rates.
if (!audio_fifo_) {
- VLOG(1) << "Audio frame size change detected; adding FIFO to compensate.";
+ VLOG(1) << "Audio frame size changed from " << number_of_frames_ << " to "
+ << number_of_frames << "; adding FIFO to compensate.";
audio_fifo_.reset(new AudioPullFifo(
output_channels_,
number_of_frames_,
base::Bind(&AUHALStream::ProvideInput, base::Unretained(this))));
}
-
- // Synchronous IO is not supported in this state.
- if (input_channels_ > 0)
- input_bus_->Zero();
- } else {
- if (input_channels_ > 0 && input_buffer_list_) {
- // Get the input data. |input_buffer_list_| is wrapped
- // to point to the data allocated in |input_bus_|.
- OSStatus result = AudioUnitRender(audio_unit_,
- flags,
- output_time_stamp,
- 1,
- number_of_frames,
- input_buffer_list_);
- if (result != noErr)
- ZeroBufferList(input_buffer_list_);
- }
}
// Make |output_bus_| wrap the output AudioBufferList.
- WrapBufferList(io_data, output_bus_.get(), number_of_frames);
+ WrapBufferList(data, output_bus_.get(), number_of_frames);
// Update the playout latency.
const double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
@@ -255,8 +226,7 @@ void AUHALStream::ProvideInput(int frame_delay, AudioBus* dest) {
}
// Supply the input data and render the output data.
- source_->OnMoreIOData(
- input_bus_.get(),
+ source_->OnMoreData(
dest,
AudioBuffersState(0,
current_hardware_pending_bytes_ +
@@ -355,52 +325,6 @@ double AUHALStream::GetPlayoutLatency(
return (delay_frames + hardware_latency_frames_);
}
-void AUHALStream::CreateIOBusses() {
- if (input_channels_ > 0) {
- // Allocate storage for the AudioBufferList used for the
- // input data from the input AudioUnit.
- // We allocate enough space for with one AudioBuffer per channel.
- size_t buffer_list_size = offsetof(AudioBufferList, mBuffers[0]) +
- (sizeof(AudioBuffer) * input_channels_);
- input_buffer_list_storage_.reset(new uint8[buffer_list_size]);
-
- input_buffer_list_ =
- reinterpret_cast<AudioBufferList*>(input_buffer_list_storage_.get());
- input_buffer_list_->mNumberBuffers = input_channels_;
-
- // |input_bus_| allocates the storage for the PCM input data.
- input_bus_ = AudioBus::Create(input_channels_, number_of_frames_);
-
- // Make the AudioBufferList point to the memory in |input_bus_|.
- UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
- for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
- input_buffer_list_->mBuffers[i].mNumberChannels = 1;
- input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
- input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
- }
- }
-
- // The output bus will wrap the AudioBufferList given to us in
- // the Render() callback.
- DCHECK_GT(output_channels_, 0);
- output_bus_ = AudioBus::CreateWrapper(output_channels_);
-}
-
-bool AUHALStream::EnableIO(bool enable, UInt32 scope) {
- // See Apple technote for details about the EnableIO property.
- // Note that we use bus 1 for input and bus 0 for output:
- // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
- UInt32 enable_IO = enable ? 1 : 0;
- OSStatus result = AudioUnitSetProperty(
- audio_unit_,
- kAudioOutputUnitProperty_EnableIO,
- scope,
- (scope == kAudioUnitScope_Input) ? 1 : 0,
- &enable_IO,
- sizeof(enable_IO));
- return (result == noErr);
-}
-
bool AUHALStream::SetStreamFormat(
AudioStreamBasicDescription* desc,
int channels,
@@ -431,8 +355,7 @@ bool AUHALStream::SetStreamFormat(
}
bool AUHALStream::ConfigureAUHAL() {
- if (device_ == kAudioObjectUnknown ||
- (input_channels_ == 0 && output_channels_ == 0))
+ if (device_ == kAudioObjectUnknown || output_channels_ == 0)
return false;
AudioComponentDescription desc = {
@@ -452,10 +375,19 @@ bool AUHALStream::ConfigureAUHAL() {
return false;
}
- // Enable input and output as appropriate.
- if (!EnableIO(input_channels_ > 0, kAudioUnitScope_Input))
- return false;
- if (!EnableIO(output_channels_ > 0, kAudioUnitScope_Output))
+ // Enable output as appropriate.
+ // See Apple technote for details about the EnableIO property.
+ // Note that we use bus 1 for input and bus 0 for output:
+ // http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
+ UInt32 enable_IO = 1;
+ result = AudioUnitSetProperty(
+ audio_unit_,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0,
+ &enable_IO,
+ sizeof(enable_IO));
+ if (result != noErr)
return false;
// Set the device to be used with the AUHAL AudioUnit.
@@ -475,42 +407,49 @@ bool AUHALStream::ConfigureAUHAL() {
// (element) numbers:
// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
- if (input_channels_ > 0) {
- if (!SetStreamFormat(&input_format_,
- input_channels_,
- kAudioUnitScope_Output,
- 1))
- return false;
- }
-
- if (output_channels_ > 0) {
- if (!SetStreamFormat(&output_format_,
- output_channels_,
- kAudioUnitScope_Input,
- 0))
- return false;
+ if (!SetStreamFormat(&output_format_,
+ output_channels_,
+ kAudioUnitScope_Input,
+ 0)) {
+ return false;
}
// Set the buffer frame size.
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as the frames_per_buffer() returned by
- // GetDefaultOutputStreamParameters().
- // See http://crbug.com/154352 for details.
- UInt32 buffer_size = number_of_frames_;
- result = AudioUnitSetProperty(
- audio_unit_,
- kAudioDevicePropertyBufferFrameSize,
- kAudioUnitScope_Output,
- 0,
- &buffer_size,
- sizeof(buffer_size));
+ // WARNING: Setting this value changes the frame size for all output audio
+ // units in the current process. As a result, the AURenderCallback must be
+ // able to handle arbitrary buffer sizes and FIFO appropriately.
+ UInt32 buffer_size = 0;
+ UInt32 property_size = sizeof(buffer_size);
+ result = AudioUnitGetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 0,
+ &buffer_size,
+ &property_size);
if (result != noErr) {
OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
+ << "AudioUnitGetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
return false;
}
+ // Only set the buffer size if we're the only active stream or the buffer size
+ // is lower than the current buffer size.
+ if (manager_->output_stream_count() == 1 || number_of_frames_ < buffer_size) {
+ buffer_size = number_of_frames_;
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 0,
+ &buffer_size,
+ sizeof(buffer_size));
+ if (result != noErr) {
+ OSSTATUS_DLOG(ERROR, result) << "AudioUnitSetProperty("
+ "kAudioDevicePropertyBufferFrameSize) "
+ "failed. Size: " << number_of_frames_;
+ return false;
+ }
+ }
+
// Setup callback.
AURenderCallbackStruct callback;
callback.inputProc = InputProc;
diff --git a/chromium/media/audio/mac/audio_auhal_mac.h b/chromium/media/audio/mac/audio_auhal_mac.h
index b488b73c0d1..8903ea3df4d 100644
--- a/chromium/media/audio/mac/audio_auhal_mac.h
+++ b/chromium/media/audio/mac/audio_auhal_mac.h
@@ -20,6 +20,7 @@
#include <AudioUnit/AudioUnit.h>
#include <CoreAudio/CoreAudio.h>
+#include "base/cancelable_callback.h"
#include "base/compiler_specific.h"
#include "base/synchronization/lock.h"
#include "media/audio/audio_io.h"
@@ -32,8 +33,7 @@ class AudioPullFifo;
// Implementation of AudioOuputStream for Mac OS X using the
// AUHAL Audio Unit present in OS 10.4 and later.
-// It is useful for low-latency output with optional synchronized
-// input.
+// It is useful for low-latency output.
//
// Overview of operation:
// 1) An object of AUHALStream is created by the AudioManager
@@ -87,9 +87,6 @@ class AUHALStream : public AudioOutputStream {
// Called by either |audio_fifo_| or Render() to provide audio data.
void ProvideInput(int frame_delay, AudioBus* dest);
- // Helper method to enable input and output.
- bool EnableIO(bool enable, UInt32 scope);
-
// Sets the stream format on the AUHAL to PCM Float32 non-interleaved
// for the given number of channels on the given scope and element.
// The created stream description will be stored in |desc|.
@@ -116,7 +113,6 @@ class AUHALStream : public AudioOutputStream {
const AudioParameters params_;
// For convenience - same as in params_.
- const int input_channels_;
const int output_channels_;
// Buffer-size.
@@ -130,7 +126,6 @@ class AUHALStream : public AudioOutputStream {
base::Lock source_lock_;
// Holds the stream format details such as bitrate.
- AudioStreamBasicDescription input_format_;
AudioStreamBasicDescription output_format_;
// The audio device to use with the AUHAL.
@@ -149,14 +144,7 @@ class AUHALStream : public AudioOutputStream {
// The flag used to stop the streaming.
bool stopped_;
- // The input AudioUnit renders its data here.
- scoped_ptr<uint8[]> input_buffer_list_storage_;
- AudioBufferList* input_buffer_list_;
-
- // Holds the actual data for |input_buffer_list_|.
- scoped_ptr<AudioBus> input_bus_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
+ // Container for retrieving data from AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> output_bus_;
// Dynamically allocated FIFO used when CoreAudio asks for unexpected frame
@@ -166,6 +154,9 @@ class AUHALStream : public AudioOutputStream {
// Current buffer delay. Set by Render().
uint32 current_hardware_pending_bytes_;
+ // Used to defer Start() to workaround http://crbug.com/160920.
+ base::CancelableClosure deferred_start_cb_;
+
DISALLOW_COPY_AND_ASSIGN(AUHALStream);
};
diff --git a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
index d709554dfaf..69179d56078 100644
--- a/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_auhal_mac_unittest.cc
@@ -4,216 +4,103 @@
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
-#include "media/audio/simple_sources.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-using ::testing::_;
-using ::testing::AnyNumber;
-using ::testing::DoAll;
-using ::testing::Field;
-using ::testing::InSequence;
-using ::testing::Invoke;
-using ::testing::NiceMock;
-using ::testing::NotNull;
-using ::testing::Return;
-
-static const int kBitsPerSample = 16;
+using testing::_;
+using testing::DoAll;
+using testing::Return;
// TODO(crogers): Most of these tests can be made platform agnostic.
// http://crbug.com/223242
namespace media {
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
-// Convenience method which creates a default AudioOutputStream object but
-// also allows the user to modify the default settings.
-class AudioOutputStreamWrapper {
- public:
- explicit AudioOutputStreamWrapper()
- : audio_man_(AudioManager::CreateForTesting()),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- bits_per_sample_(kBitsPerSample) {
- AudioParameters preferred_params =
- audio_man_->GetDefaultOutputStreamParameters();
- channel_layout_ = preferred_params.channel_layout();
- channels_ = preferred_params.channels();
- sample_rate_ = preferred_params.sample_rate();
- samples_per_packet_ = preferred_params.frames_per_buffer();
- }
-
- ~AudioOutputStreamWrapper() {}
+ACTION(ZeroBuffer) {
+ arg0->Zero();
+}
- // Creates AudioOutputStream object using default parameters.
- AudioOutputStream* Create() {
- return CreateOutputStream();
- }
+ACTION_P(SignalEvent, event) {
+ event->Signal();
+}
- // Creates AudioOutputStream object using non-default parameters where the
- // frame size is modified.
- AudioOutputStream* Create(int samples_per_packet) {
- samples_per_packet_ = samples_per_packet;
- return CreateOutputStream();
+class AUHALStreamTest : public testing::Test {
+ public:
+ AUHALStreamTest()
+ : message_loop_(base::MessageLoop::TYPE_UI),
+ manager_(AudioManager::CreateForTesting()) {
+ // Wait for the AudioManager to finish any initialization on the audio loop.
+ base::RunLoop().RunUntilIdle();
}
- // Creates AudioOutputStream object using non-default parameters where the
- // sample rate is modified.
- AudioOutputStream* CreateWithSampleRate(int sample_rate) {
- sample_rate_ = sample_rate;
- return CreateOutputStream();
+ virtual ~AUHALStreamTest() {
+ base::RunLoop().RunUntilIdle();
}
- // Creates AudioOutputStream object using non-default parameters where the
- // channel layout is modified.
- AudioOutputStream* CreateWithLayout(ChannelLayout layout) {
- channel_layout_ = layout;
- channels_ = ChannelLayoutToChannelCount(layout);
- return CreateOutputStream();
+ AudioOutputStream* Create() {
+ return manager_->MakeAudioOutputStream(
+ manager_->GetDefaultOutputStreamParameters(), "");
}
- AudioParameters::Format format() const { return format_; }
- int channels() const { return ChannelLayoutToChannelCount(channel_layout_); }
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
-
bool CanRunAudioTests() {
- return audio_man_->HasAudioOutputDevices();
+ return manager_->HasAudioOutputDevices();
}
- private:
- AudioOutputStream* CreateOutputStream() {
- AudioParameters params;
- params.Reset(format_, channel_layout_,
- channels_, 0,
- sample_rate_, bits_per_sample_,
- samples_per_packet_);
-
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params,
- std::string(), std::string());
- EXPECT_TRUE(aos);
- return aos;
- }
+ protected:
+ base::MessageLoop message_loop_;
+ scoped_ptr<AudioManager> manager_;
+ MockAudioSourceCallback source_;
- scoped_ptr<AudioManager> audio_man_;
-
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int channels_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AUHALStreamTest);
};
-// Test that we can get the hardware sample-rate.
-TEST(AUHALStreamTest, HardwareSampleRate) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, HardwareSampleRate) {
+ if (!CanRunAudioTests())
return;
-
- int sample_rate = aosw.sample_rate();
- EXPECT_GE(sample_rate, 16000);
- EXPECT_LE(sample_rate, 192000);
-}
-
-// Test Create(), Close() calling sequence.
-TEST(AUHALStreamTest, CreateAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
- return;
-
- AudioOutputStream* aos = aosw.Create();
- aos->Close();
-}
-
-// Test Open(), Close() calling sequence.
-TEST(AUHALStreamTest, OpenAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
- return;
-
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
- aos->Close();
+ const AudioParameters preferred_params =
+ manager_->GetDefaultOutputStreamParameters();
+ EXPECT_GE(preferred_params.sample_rate(), 16000);
+ EXPECT_LE(preferred_params.sample_rate(), 192000);
}
-// Test Open(), Start(), Close() calling sequence.
-TEST(AUHALStreamTest, OpenStartAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, CreateClose) {
+ if (!CanRunAudioTests())
return;
-
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
- MockAudioSourceCallback source;
- EXPECT_CALL(source, OnError(aos))
- .Times(0);
- aos->Start(&source);
- aos->Close();
-}
-
-// Test Open(), Start(), Stop(), Close() calling sequence.
-TEST(AUHALStreamTest, OpenStartStopAndClose) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
- return;
-
- AudioOutputStream* aos = aosw.Create();
- EXPECT_TRUE(aos->Open());
- MockAudioSourceCallback source;
- EXPECT_CALL(source, OnError(aos))
- .Times(0);
- aos->Start(&source);
- aos->Stop();
- aos->Close();
+ Create()->Close();
}
-// This test produces actual audio for 0.5 seconds on the default audio device
-// at the hardware sample-rate (usually 44.1KHz).
-// Parameters have been chosen carefully so you should not hear
-// pops or noises while the sound is playing.
-TEST(AUHALStreamTest, AUHALStreamPlay200HzTone) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, CreateOpenClose) {
+ if (!CanRunAudioTests())
return;
-
- AudioOutputStream* aos = aosw.CreateWithLayout(CHANNEL_LAYOUT_MONO);
-
- EXPECT_TRUE(aos->Open());
-
- SineWaveAudioSource source(1, 200.0, aosw.sample_rate());
- aos->Start(&source);
- usleep(500000);
-
- aos->Stop();
- aos->Close();
+ AudioOutputStream* stream = Create();
+ EXPECT_TRUE(stream->Open());
+ stream->Close();
}
-// Test that Open() will fail with a sample-rate which isn't the hardware
-// sample-rate.
-TEST(AUHALStreamTest, AUHALStreamInvalidSampleRate) {
- AudioOutputStreamWrapper aosw;
- if (!aosw.CanRunAudioTests())
+TEST_F(AUHALStreamTest, CreateOpenStartStopClose) {
+ if (!CanRunAudioTests())
return;
- int non_default_sample_rate = aosw.sample_rate() == 44100 ?
- 48000 : 44100;
- AudioOutputStream* aos = aosw.CreateWithSampleRate(non_default_sample_rate);
+ AudioOutputStream* stream = Create();
+ EXPECT_TRUE(stream->Open());
- EXPECT_FALSE(aos->Open());
+ // Wait for the first data callback from the OS.
+ base::WaitableEvent event(false, false);
+ EXPECT_CALL(source_, OnMoreData(_, _))
+ .WillOnce(DoAll(ZeroBuffer(), SignalEvent(&event), Return(0)));
+ EXPECT_CALL(source_, OnError(_)).Times(0);
+ stream->Start(&source_);
+ event.Wait();
- aos->Close();
+ stream->Stop();
+ stream->Close();
}
} // namespace media
diff --git a/chromium/media/audio/mac/audio_device_listener_mac.cc b/chromium/media/audio/mac/audio_device_listener_mac.cc
index 5c5ca355b9a..ef8bdd5b96c 100644
--- a/chromium/media/audio/mac/audio_device_listener_mac.cc
+++ b/chromium/media/audio/mac/audio_device_listener_mac.cc
@@ -11,7 +11,6 @@
#include "base/mac/mac_util.h"
#include "base/message_loop/message_loop.h"
#include "base/pending_task.h"
-#include "media/audio/mac/audio_low_latency_output_mac.h"
namespace media {
diff --git a/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc b/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
index 7efb3297172..12c88b651a7 100644
--- a/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_device_listener_mac_unittest.cc
@@ -9,7 +9,7 @@
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
#include "media/audio/mac/audio_device_listener_mac.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,12 +36,11 @@ class AudioDeviceListenerMacTest : public testing::Test {
}
void CreateDeviceListener() {
- // Force a post task using BindToLoop to ensure device listener internals
- // are working correctly.
- output_device_listener_.reset(new AudioDeviceListenerMac(BindToLoop(
- message_loop_.message_loop_proxy(), base::Bind(
- &AudioDeviceListenerMacTest::OnDeviceChange,
- base::Unretained(this)))));
+ // Force a post task using BindToCurrentLoop() to ensure device listener
+ // internals are working correctly.
+ output_device_listener_.reset(new AudioDeviceListenerMac(BindToCurrentLoop(
+ base::Bind(&AudioDeviceListenerMacTest::OnDeviceChange,
+ base::Unretained(this)))));
}
void DestroyDeviceListener() {
diff --git a/chromium/media/audio/mac/audio_input_mac.cc b/chromium/media/audio/mac/audio_input_mac.cc
index 4aee1179cfa..b7f6e173109 100644
--- a/chromium/media/audio/mac/audio_input_mac.cc
+++ b/chromium/media/audio/mac/audio_input_mac.cc
@@ -9,18 +9,20 @@
#include "base/basictypes.h"
#include "base/logging.h"
#include "base/mac/mac_logging.h"
-#include "media/audio/audio_manager_base.h"
-
+#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/audio_bus.h"
namespace media {
PCMQueueInAudioInputStream::PCMQueueInAudioInputStream(
- AudioManagerBase* manager, const AudioParameters& params)
+ AudioManagerMac* manager,
+ const AudioParameters& params)
: manager_(manager),
callback_(NULL),
audio_queue_(NULL),
buffer_size_bytes_(0),
- started_(false) {
+ started_(false),
+ audio_bus_(media::AudioBus::Create(params)) {
// We must have a manager.
DCHECK(manager_);
// A frame is one sample across all channels. In interleaved audio the per
@@ -65,6 +67,21 @@ void PCMQueueInAudioInputStream::Start(AudioInputCallback* callback) {
DLOG_IF(ERROR, !audio_queue_) << "Open() has not been called successfully";
if (callback_ || !audio_queue_)
return;
+
+ // Check if we should defer Start() for http://crbug.com/160920.
+ if (manager_->ShouldDeferStreamStart()) {
+ // Use a cancellable closure so that if Stop() is called before Start()
+ // actually runs, we can cancel the pending start.
+ deferred_start_cb_.Reset(base::Bind(
+ &PCMQueueInAudioInputStream::Start, base::Unretained(this), callback));
+ manager_->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE,
+ deferred_start_cb_.callback(),
+ base::TimeDelta::FromSeconds(
+ AudioManagerMac::kStartDelayInSecsForPowerEvents));
+ return;
+ }
+
callback_ = callback;
OSStatus err = AudioQueueStart(audio_queue_, NULL);
if (err != noErr) {
@@ -75,6 +92,7 @@ void PCMQueueInAudioInputStream::Start(AudioInputCallback* callback) {
}
void PCMQueueInAudioInputStream::Stop() {
+ deferred_start_cb_.Cancel();
if (!audio_queue_ || !started_)
return;
@@ -85,9 +103,12 @@ void PCMQueueInAudioInputStream::Stop() {
HandleError(err);
started_ = false;
+ callback_ = NULL;
}
void PCMQueueInAudioInputStream::Close() {
+ Stop();
+
// It is valid to call Close() before calling Open() or Start(), thus
// |audio_queue_| and |callback_| might be NULL.
if (audio_queue_) {
@@ -96,10 +117,7 @@ void PCMQueueInAudioInputStream::Close() {
if (err != noErr)
HandleError(err);
}
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
+
manager_->ReleaseInputStream(this);
// CARE: This object may now be destroyed.
}
@@ -200,11 +218,11 @@ void PCMQueueInAudioInputStream::HandleInputBuffer(
if (elapsed < kMinDelay)
base::PlatformThread::Sleep(kMinDelay - elapsed);
- callback_->OnData(this,
- reinterpret_cast<const uint8*>(audio_buffer->mAudioData),
- audio_buffer->mAudioDataByteSize,
- audio_buffer->mAudioDataByteSize,
- 0.0);
+ uint8* audio_data = reinterpret_cast<uint8*>(audio_buffer->mAudioData);
+ audio_bus_->FromInterleaved(
+ audio_data, audio_bus_->frames(), format_.mBitsPerChannel / 8);
+ callback_->OnData(
+ this, audio_bus_.get(), audio_buffer->mAudioDataByteSize, 0.0);
last_fill_ = base::TimeTicks::Now();
}
diff --git a/chromium/media/audio/mac/audio_input_mac.h b/chromium/media/audio/mac/audio_input_mac.h
index 77eb65b0315..a6e897e0610 100644
--- a/chromium/media/audio/mac/audio_input_mac.h
+++ b/chromium/media/audio/mac/audio_input_mac.h
@@ -8,6 +8,7 @@
#include <AudioToolbox/AudioFormat.h>
#include <AudioToolbox/AudioQueue.h>
+#include "base/cancelable_callback.h"
#include "base/compiler_specific.h"
#include "base/time/time.h"
#include "media/audio/audio_io.h"
@@ -15,14 +16,15 @@
namespace media {
-class AudioManagerBase;
+class AudioBus;
+class AudioManagerMac;
// Implementation of AudioInputStream for Mac OS X using the audio queue service
// present in OS 10.5 and later. Design reflects PCMQueueOutAudioOutputStream.
class PCMQueueInAudioInputStream : public AudioInputStream {
public:
// Parameters as per AudioManager::MakeAudioInputStream.
- PCMQueueInAudioInputStream(AudioManagerBase* manager,
+ PCMQueueInAudioInputStream(AudioManagerMac* manager,
const AudioParameters& params);
virtual ~PCMQueueInAudioInputStream();
@@ -66,7 +68,7 @@ class PCMQueueInAudioInputStream : public AudioInputStream {
static const int kNumberBuffers = 3;
// Manager that owns this stream, used for closing down.
- AudioManagerBase* manager_;
+ AudioManagerMac* manager_;
// We use the callback mostly to periodically supply the recorded audio data.
AudioInputCallback* callback_;
// Structure that holds the stream format details such as bitrate.
@@ -79,6 +81,10 @@ class PCMQueueInAudioInputStream : public AudioInputStream {
bool started_;
// Used to determine if we need to slow down |callback_| calls.
base::TimeTicks last_fill_;
+ // Used to defer Start() to workaround http://crbug.com/160920.
+ base::CancelableClosure deferred_start_cb_;
+
+ scoped_ptr<media::AudioBus> audio_bus_;
DISALLOW_COPY_AND_ASSIGN(PCMQueueInAudioInputStream);
};
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.cc b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
index dbc75bfea31..d7a3430f6d8 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/mac/mac_logging.h"
#include "media/audio/mac/audio_manager_mac.h"
+#include "media/base/audio_bus.h"
#include "media/base/data_buffer.h"
namespace media {
@@ -31,11 +32,10 @@ static std::ostream& operator<<(std::ostream& os,
// http://developer.apple.com/library/mac/#technotes/tn2091/_index.html
// for more details and background regarding this implementation.
-AUAudioInputStream::AUAudioInputStream(
- AudioManagerMac* manager,
- const AudioParameters& input_params,
- const AudioParameters& output_params,
- AudioDeviceID audio_device_id)
+AUAudioInputStream::AUAudioInputStream(AudioManagerMac* manager,
+ const AudioParameters& input_params,
+ const AudioParameters& output_params,
+ AudioDeviceID audio_device_id)
: manager_(manager),
sink_(NULL),
audio_unit_(0),
@@ -43,7 +43,8 @@ AUAudioInputStream::AUAudioInputStream(
started_(false),
hardware_latency_frames_(0),
fifo_delay_bytes_(0),
- number_of_channels_in_frame_(0) {
+ number_of_channels_in_frame_(0),
+ audio_bus_(media::AudioBus::Create(input_params)) {
DCHECK(manager_);
// Set up the desired (output) format specified by the client.
@@ -64,9 +65,6 @@ AUAudioInputStream::AUAudioInputStream(
// Set number of sample frames per callback used by the internal audio layer.
// An internal FIFO is then utilized to adapt the internal size to the size
// requested by the client.
- // Note that we use the same native buffer size as for the output side here
- // since the AUHAL implementation requires that both capture and render side
- // use the same buffer size. See http://crbug.com/154352 for more details.
number_of_frames_ = output_params.frames_per_buffer();
DVLOG(1) << "Size of data buffer in frames : " << number_of_frames_;
@@ -233,23 +231,38 @@ bool AUAudioInputStream::Open() {
}
// Set the desired number of frames in the IO buffer (output scope).
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as the frames_per_buffer() returned by
- // GetInputStreamParameters().
- // TODO(henrika): Due to http://crrev.com/159666 this is currently not true
- // and should be fixed, a CHECK() should be added at that time.
- result = AudioUnitSetProperty(audio_unit_,
+ // WARNING: Setting this value changes the frame size for all input audio
+ // units in the current process. As a result, the AURenderCallback must be
+ // able to handle arbitrary buffer sizes and FIFO appropriately.
+ UInt32 buffer_size = 0;
+ UInt32 property_size = sizeof(buffer_size);
+ result = AudioUnitGetProperty(audio_unit_,
kAudioDevicePropertyBufferFrameSize,
kAudioUnitScope_Output,
1,
- &number_of_frames_, // size is set in the ctor
- sizeof(number_of_frames_));
- if (result) {
+ &buffer_size,
+ &property_size);
+ if (result != noErr) {
HandleError(result);
return false;
}
+ // Only set the buffer size if we're the only active stream or the buffer size
+ // is lower than the current buffer size.
+ if (manager_->input_stream_count() == 1 || number_of_frames_ < buffer_size) {
+ buffer_size = number_of_frames_;
+ result = AudioUnitSetProperty(audio_unit_,
+ kAudioDevicePropertyBufferFrameSize,
+ kAudioUnitScope_Output,
+ 1,
+ &buffer_size,
+ sizeof(buffer_size));
+ if (result != noErr) {
+ HandleError(result);
+ return false;
+ }
+ }
+
// Finally, initialize the audio unit and ensure that it is ready to render.
// Allocates memory according to the maximum number of audio frames
// it can produce in response to a single render call.
@@ -274,6 +287,21 @@ void AUAudioInputStream::Start(AudioInputCallback* callback) {
DLOG_IF(ERROR, !audio_unit_) << "Open() has not been called successfully";
if (started_ || !audio_unit_)
return;
+
+ // Check if we should defer Start() for http://crbug.com/160920.
+ if (manager_->ShouldDeferStreamStart()) {
+ // Use a cancellable closure so that if Stop() is called before Start()
+ // actually runs, we can cancel the pending start.
+ deferred_start_cb_.Reset(base::Bind(
+ &AUAudioInputStream::Start, base::Unretained(this), callback));
+ manager_->GetTaskRunner()->PostDelayedTask(
+ FROM_HERE,
+ deferred_start_cb_.callback(),
+ base::TimeDelta::FromSeconds(
+ AudioManagerMac::kStartDelayInSecsForPowerEvents));
+ return;
+ }
+
sink_ = callback;
StartAgc();
OSStatus result = AudioOutputUnitStart(audio_unit_);
@@ -289,9 +317,10 @@ void AUAudioInputStream::Stop() {
return;
StopAgc();
OSStatus result = AudioOutputUnitStop(audio_unit_);
- if (result == noErr) {
- started_ = false;
- }
+ DCHECK_EQ(result, noErr);
+ started_ = false;
+ sink_ = NULL;
+
OSSTATUS_DLOG_IF(ERROR, result != noErr, result)
<< "Failed to stop acquiring data";
}
@@ -310,10 +339,6 @@ void AUAudioInputStream::Close() {
CloseComponent(audio_unit_);
audio_unit_ = 0;
}
- if (sink_) {
- sink_->OnClose(this);
- sink_ = NULL;
- }
// Inform the audio manager that we have been closed. This can cause our
// destruction.
@@ -518,12 +543,13 @@ OSStatus AUAudioInputStream::Provide(UInt32 number_of_frames,
// Read from FIFO into temporary data buffer.
fifo_->Read(data_->writable_data(), requested_size_bytes_);
+ // Copy captured (and interleaved) data into deinterleaved audio bus.
+ audio_bus_->FromInterleaved(
+ data_->data(), audio_bus_->frames(), format_.mBitsPerChannel / 8);
+
// Deliver data packet, delay estimation and volume level to the user.
- sink_->OnData(this,
- data_->data(),
- requested_size_bytes_,
- capture_delay_bytes,
- normalized_volume);
+ sink_->OnData(
+ this, audio_bus_.get(), capture_delay_bytes, normalized_volume);
}
return noErr;
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac.h b/chromium/media/audio/mac/audio_low_latency_input_mac.h
index 04592d2cecf..7726227eae5 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac.h
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac.h
@@ -39,7 +39,7 @@
#include <AudioUnit/AudioUnit.h>
#include <CoreAudio/CoreAudio.h>
-#include "base/atomicops.h"
+#include "base/cancelable_callback.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
#include "media/audio/agc_audio_stream.h"
@@ -49,6 +49,7 @@
namespace media {
+class AudioBus;
class AudioManagerMac;
class DataBuffer;
@@ -162,6 +163,13 @@ class AUAudioInputStream : public AgcAudioStream<AudioInputStream> {
// OnData() callbacks where each callback contains this amount of bytes.
int requested_size_bytes_;
+ // Used to defer Start() to workaround http://crbug.com/160920.
+ base::CancelableClosure deferred_start_cb_;
+
+ // Extra audio bus used for storage of deinterleaved data for the OnData
+ // callback.
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(AUAudioInputStream);
};
diff --git a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
index 9360befe575..79721d4f37b 100644
--- a/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
+++ b/chromium/media/audio/mac/audio_low_latency_input_mac_unittest.cc
@@ -5,6 +5,7 @@
#include "base/basictypes.h"
#include "base/environment.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "media/audio/audio_io.h"
@@ -22,18 +23,19 @@ using ::testing::NotNull;
namespace media {
-ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
+ACTION_P4(CheckCountAndPostQuitTask, count, limit, loop, closure) {
if (++*count >= limit) {
- loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
+ loop->PostTask(FROM_HERE, closure);
}
}
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
@@ -74,17 +76,23 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
// AudioInputStream::AudioInputCallback implementation.
virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE {
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+
// Store data data in a temporary buffer to avoid making blocking
// fwrite() calls in the audio callback. The complete buffer will be
// written to file in the destructor.
- if (buffer_.Append(src, size)) {
+ const int size = bytes_per_sample * num_samples;
+ if (buffer_.Append((const uint8*)interleaved.get(), size)) {
bytes_to_write_ += size;
}
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {}
virtual void OnError(AudioInputStream* stream) OVERRIDE {}
private:
@@ -95,8 +103,16 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
class MacAudioInputTest : public testing::Test {
protected:
- MacAudioInputTest() : audio_manager_(AudioManager::CreateForTesting()) {}
- virtual ~MacAudioInputTest() {}
+ MacAudioInputTest()
+ : message_loop_(base::MessageLoop::TYPE_UI),
+ audio_manager_(AudioManager::CreateForTesting()) {
+ // Wait for the AudioManager to finish any initialization on the audio loop.
+ base::RunLoop().RunUntilIdle();
+ }
+
+ virtual ~MacAudioInputTest() {
+ base::RunLoop().RunUntilIdle();
+ }
// Convenience method which ensures that we are not running on the build
// bots and that at least one valid input device can be found.
@@ -134,6 +150,7 @@ class MacAudioInputTest : public testing::Test {
return ais;
}
+ base::MessageLoop message_loop_;
scoped_ptr<AudioManager> audio_manager_;
};
@@ -162,8 +179,6 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartAndClose) {
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -176,8 +191,6 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamOpenStartStopAndClose) {
MockAudioInputCallback sink;
ais->Start(&sink);
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -206,8 +219,6 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamMiscCallingSequences) {
ais->Stop();
EXPECT_FALSE(auais->started());
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -217,32 +228,24 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyMonoRecording) {
return;
int count = 0;
- base::MessageLoopForUI loop;
// Create an audio input stream which records in mono.
AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_MONO);
EXPECT_TRUE(ais->Open());
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- int bits_per_sample = 16;
- uint32 bytes_per_packet = samples_per_packet * (bits_per_sample / 8);
-
MockAudioInputCallback sink;
// We use 10ms packets and will run the test until ten packets are received.
// All should contain valid packets of the same size and a valid delay
// estimate.
- EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
+ base::RunLoop run_loop;
+ EXPECT_CALL(sink, OnData(ais, NotNull(), _, _))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, &message_loop_, run_loop.QuitClosure()));
ais->Start(&sink);
- loop.Run();
+ run_loop.Run();
ais->Stop();
-
- // Verify that the sink receieves OnClose() call when calling Close().
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
@@ -252,17 +255,11 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
return;
int count = 0;
- base::MessageLoopForUI loop;
// Create an audio input stream which records in stereo.
AudioInputStream* ais = CreateAudioInputStream(CHANNEL_LAYOUT_STEREO);
EXPECT_TRUE(ais->Open());
- int fs = static_cast<int>(AUAudioInputStream::HardwareSampleRate());
- int samples_per_packet = fs / 100;
- int bits_per_sample = 16;
- uint32 bytes_per_packet = 2 * samples_per_packet * (bits_per_sample / 8);
-
MockAudioInputCallback sink;
// We use 10ms packets and will run the test until ten packets are received.
@@ -275,16 +272,14 @@ TEST_F(MacAudioInputTest, AUAudioInputStreamVerifyStereoRecording) {
// parameter #4 does no longer pass. I am removing this restriction here to
// ensure that we can land the patch but will revisit this test again when
// more analysis of the delay estimates are done.
- EXPECT_CALL(sink, OnData(ais, NotNull(), bytes_per_packet, _, _))
+ base::RunLoop run_loop;
+ EXPECT_CALL(sink, OnData(ais, NotNull(), _, _))
.Times(AtLeast(10))
- .WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
+ .WillRepeatedly(CheckCountAndPostQuitTask(
+ &count, 10, &message_loop_, run_loop.QuitClosure()));
ais->Start(&sink);
- loop.Run();
+ run_loop.Run();
ais->Stop();
-
- // Verify that the sink receieves OnClose() call when calling Close().
- EXPECT_CALL(sink, OnClose(ais))
- .Times(1);
ais->Close();
}
diff --git a/chromium/media/audio/mac/audio_low_latency_output_mac.cc b/chromium/media/audio/mac/audio_low_latency_output_mac.cc
deleted file mode 100644
index afa480aefb9..00000000000
--- a/chromium/media/audio/mac/audio_low_latency_output_mac.cc
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/media_switches.h"
-
-namespace media {
-
-static std::ostream& operator<<(std::ostream& os,
- const AudioStreamBasicDescription& format) {
- os << "sample rate : " << format.mSampleRate << std::endl
- << "format ID : " << format.mFormatID << std::endl
- << "format flags : " << format.mFormatFlags << std::endl
- << "bytes per packet : " << format.mBytesPerPacket << std::endl
- << "frames per packet : " << format.mFramesPerPacket << std::endl
- << "bytes per frame : " << format.mBytesPerFrame << std::endl
- << "channels per frame: " << format.mChannelsPerFrame << std::endl
- << "bits per channel : " << format.mBitsPerChannel;
- return os;
-}
-
-static AudioObjectPropertyAddress kDefaultOutputDeviceAddress = {
- kAudioHardwarePropertyDefaultOutputDevice,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
-};
-
-// Overview of operation:
-// 1) An object of AUAudioOutputStream is created by the AudioManager
-// factory: audio_man->MakeAudioStream().
-// 2) Next some thread will call Open(), at that point the underlying
-// default output Audio Unit is created and configured.
-// 3) Then some thread will call Start(source).
-// Then the Audio Unit is started which creates its own thread which
-// periodically will call the source for more data as buffers are being
-// consumed.
-// 4) At some point some thread will call Stop(), which we handle by directly
-// stopping the default output Audio Unit.
-// 6) The same thread that called stop will call Close() where we cleanup
-// and notify the audio manager, which likely will destroy this object.
-
-AUAudioOutputStream::AUAudioOutputStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : manager_(manager),
- source_(NULL),
- output_unit_(0),
- output_device_id_(kAudioObjectUnknown),
- volume_(1),
- hardware_latency_frames_(0),
- stopped_(false),
- audio_bus_(AudioBus::Create(params)) {
- // We must have a manager.
- DCHECK(manager_);
-
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- DVLOG(1) << "Desired ouput format: " << format_;
-
- // Calculate the number of sample frames per callback.
- number_of_frames_ = params.frames_per_buffer();
- DVLOG(1) << "Number of frames per callback: " << number_of_frames_;
-}
-
-AUAudioOutputStream::~AUAudioOutputStream() {
-}
-
-bool AUAudioOutputStream::Open() {
- // Obtain the current input device selected by the user.
- UInt32 size = sizeof(output_device_id_);
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &kDefaultOutputDeviceAddress,
- 0,
- 0,
- &size,
- &output_device_id_);
- if (result != noErr || output_device_id_ == kAudioObjectUnknown) {
- OSSTATUS_DLOG(ERROR, result)
- << "Could not get default audio output device.";
- return false;
- }
-
- // Open and initialize the DefaultOutputUnit.
- AudioComponent comp;
- AudioComponentDescription desc;
-
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_DefaultOutput;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
- comp = AudioComponentFindNext(0, &desc);
- if (!comp)
- return false;
-
- result = AudioComponentInstanceNew(comp, &output_unit_);
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result) << "AudioComponentInstanceNew() failed.";
- return false;
- }
-
- result = AudioUnitInitialize(output_unit_);
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result) << "AudioUnitInitialize() failed.";
- return false;
- }
-
- hardware_latency_frames_ = GetHardwareLatency();
-
- return Configure();
-}
-
-bool AUAudioOutputStream::Configure() {
- // Set the render callback.
- AURenderCallbackStruct input;
- input.inputProc = InputProc;
- input.inputProcRefCon = this;
- OSStatus result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Global,
- 0,
- &input,
- sizeof(input));
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioUnitProperty_SetRenderCallback) failed.";
- return false;
- }
-
- // Set the stream format.
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &format_,
- sizeof(format_));
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioUnitProperty_StreamFormat) failed.";
- return false;
- }
-
- // Set the buffer frame size.
- // WARNING: Setting this value changes the frame size for all audio units in
- // the current process. It's imperative that the input and output frame sizes
- // be the same as the frames_per_buffer() returned by
- // GetDefaultOutputStreamParameters.
- // See http://crbug.com/154352 for details.
- const AudioParameters hw_params =
- manager_->GetDefaultOutputStreamParameters();
- if (number_of_frames_ != static_cast<size_t>(hw_params.frames_per_buffer())) {
- DLOG(ERROR) << "Audio buffer size does not match hardware buffer size.";
- return false;
- }
-
- UInt32 buffer_size = number_of_frames_;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioDevicePropertyBufferFrameSize,
- kAudioUnitScope_Output,
- 0,
- &buffer_size,
- sizeof(buffer_size));
- if (result != noErr) {
- OSSTATUS_DLOG(ERROR, result)
- << "AudioUnitSetProperty(kAudioDevicePropertyBufferFrameSize) failed.";
- return false;
- }
-
- return true;
-}
-
-void AUAudioOutputStream::Close() {
- if (output_unit_)
- AudioComponentInstanceDispose(output_unit_);
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AUAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- if (!output_unit_) {
- DLOG(ERROR) << "Open() has not been called successfully";
- return;
- }
-
- stopped_ = false;
- {
- base::AutoLock auto_lock(source_lock_);
- source_ = callback;
- }
-
- AudioOutputUnitStart(output_unit_);
-}
-
-void AUAudioOutputStream::Stop() {
- if (stopped_)
- return;
-
- AudioOutputUnitStop(output_unit_);
-
- base::AutoLock auto_lock(source_lock_);
- source_ = NULL;
- stopped_ = true;
-}
-
-void AUAudioOutputStream::SetVolume(double volume) {
- if (!output_unit_)
- return;
- volume_ = static_cast<float>(volume);
-
- // TODO(crogers): set volume property
-}
-
-void AUAudioOutputStream::GetVolume(double* volume) {
- if (!output_unit_)
- return;
- *volume = volume_;
-}
-
-// Pulls on our provider to get rendered audio stream.
-// Note to future hackers of this function: Do not add locks here because this
-// is running on a real-time thread (for low-latency).
-OSStatus AUAudioOutputStream::Render(UInt32 number_of_frames,
- AudioBufferList* io_data,
- const AudioTimeStamp* output_time_stamp) {
- // Update the playout latency.
- double playout_latency_frames = GetPlayoutLatency(output_time_stamp);
-
- AudioBuffer& buffer = io_data->mBuffers[0];
- uint8* audio_data = reinterpret_cast<uint8*>(buffer.mData);
- uint32 hardware_pending_bytes = static_cast<uint32>
- ((playout_latency_frames + 0.5) * format_.mBytesPerFrame);
-
- // Unfortunately AUAudioInputStream and AUAudioOutputStream share the frame
- // size set by kAudioDevicePropertyBufferFrameSize above on a per process
- // basis. What this means is that the |number_of_frames| value may be larger
- // or smaller than the value set during Configure(). In this case either
- // audio input or audio output will be broken, so just output silence.
- // TODO(crogers): Figure out what can trigger a change in |number_of_frames|.
- // See http://crbug.com/154352 for details.
- if (number_of_frames != static_cast<UInt32>(audio_bus_->frames())) {
- memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
- return noErr;
- }
-
- int frames_filled = 0;
- {
- // Render() shouldn't be called except between AudioOutputUnitStart() and
- // AudioOutputUnitStop() calls, but crash reports have shown otherwise:
- // http://crbug.com/178765. We use |source_lock_| to prevent races and
- // crashes in Render() when |source_| is cleared.
- base::AutoLock auto_lock(source_lock_);
- if (!source_) {
- memset(audio_data, 0, number_of_frames * format_.mBytesPerFrame);
- return noErr;
- }
-
- frames_filled = source_->OnMoreData(
- audio_bus_.get(), AudioBuffersState(0, hardware_pending_bytes));
- }
-
- // Note: If this ever changes to output raw float the data must be clipped and
- // sanitized since it may come from an untrusted source such as NaCl.
- audio_bus_->Scale(volume_);
- audio_bus_->ToInterleaved(
- frames_filled, format_.mBitsPerChannel / 8, audio_data);
-
- return noErr;
-}
-
-// DefaultOutputUnit callback
-OSStatus AUAudioOutputStream::InputProc(void* user_data,
- AudioUnitRenderActionFlags*,
- const AudioTimeStamp* output_time_stamp,
- UInt32,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AUAudioOutputStream* audio_output =
- static_cast<AUAudioOutputStream*>(user_data);
- if (!audio_output)
- return -1;
-
- return audio_output->Render(number_of_frames, io_data, output_time_stamp);
-}
-
-int AUAudioOutputStream::HardwareSampleRate() {
- // Determine the default output device's sample-rate.
- AudioDeviceID device_id = kAudioObjectUnknown;
- UInt32 info_size = sizeof(device_id);
- OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
- &kDefaultOutputDeviceAddress,
- 0,
- 0,
- &info_size,
- &device_id);
- if (result != noErr || device_id == kAudioObjectUnknown) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default audio output device.";
- return 0;
- }
-
- Float64 nominal_sample_rate;
- info_size = sizeof(nominal_sample_rate);
-
- AudioObjectPropertyAddress nominal_sample_rate_address = {
- kAudioDevicePropertyNominalSampleRate,
- kAudioObjectPropertyScopeGlobal,
- kAudioObjectPropertyElementMaster
- };
- result = AudioObjectGetPropertyData(device_id,
- &nominal_sample_rate_address,
- 0,
- 0,
- &info_size,
- &nominal_sample_rate);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result)
- << "Could not get default sample rate for device: " << device_id;
- return 0;
- }
-
- return static_cast<int>(nominal_sample_rate);
-}
-
-double AUAudioOutputStream::GetHardwareLatency() {
- if (!output_unit_ || output_device_id_ == kAudioObjectUnknown) {
- DLOG(WARNING) << "Audio unit object is NULL or device ID is unknown";
- return 0.0;
- }
-
- // Get audio unit latency.
- Float64 audio_unit_latency_sec = 0.0;
- UInt32 size = sizeof(audio_unit_latency_sec);
- OSStatus result = AudioUnitGetProperty(output_unit_,
- kAudioUnitProperty_Latency,
- kAudioUnitScope_Global,
- 0,
- &audio_unit_latency_sec,
- &size);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
- return 0.0;
- }
-
- // Get output audio device latency.
- AudioObjectPropertyAddress property_address = {
- kAudioDevicePropertyLatency,
- kAudioDevicePropertyScopeOutput,
- kAudioObjectPropertyElementMaster
- };
- UInt32 device_latency_frames = 0;
- size = sizeof(device_latency_frames);
- result = AudioObjectGetPropertyData(output_device_id_,
- &property_address,
- 0,
- NULL,
- &size,
- &device_latency_frames);
- if (result != noErr) {
- OSSTATUS_DLOG(WARNING, result) << "Could not get audio unit latency";
- return 0.0;
- }
-
- return static_cast<double>((audio_unit_latency_sec *
- format_.mSampleRate) + device_latency_frames);
-}
-
-double AUAudioOutputStream::GetPlayoutLatency(
- const AudioTimeStamp* output_time_stamp) {
- // Ensure mHostTime is valid.
- if ((output_time_stamp->mFlags & kAudioTimeStampHostTimeValid) == 0)
- return 0;
-
- // Get the delay between the moment getting the callback and the scheduled
- // time stamp that tells when the data is going to be played out.
- UInt64 output_time_ns = AudioConvertHostTimeToNanos(
- output_time_stamp->mHostTime);
- UInt64 now_ns = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
-
- // Prevent overflow leading to huge delay information; occurs regularly on
- // the bots, probably less so in the wild.
- if (now_ns > output_time_ns)
- return 0;
-
- double delay_frames = static_cast<double>
- (1e-9 * (output_time_ns - now_ns) * format_.mSampleRate);
-
- return (delay_frames + hardware_latency_frames_);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/audio_low_latency_output_mac.h b/chromium/media/audio/mac/audio_low_latency_output_mac.h
deleted file mode 100644
index 27f3b3a837a..00000000000
--- a/chromium/media/audio/mac/audio_low_latency_output_mac.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Implementation notes:
-//
-// - It is recommended to first acquire the native sample rate of the default
-// output device and then use the same rate when creating this object.
-// Use AUAudioOutputStream::HardwareSampleRate() to retrieve the sample rate.
-// - Calling Close() also leads to self destruction.
-// - The latency consists of two parts:
-// 1) Hardware latency, which includes Audio Unit latency, audio device
-// latency;
-// 2) The delay between the moment getting the callback and the scheduled time
-// stamp that tells when the data is going to be played out.
-//
-#ifndef MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
-
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// Implementation of AudioOuputStream for Mac OS X using the
-// default output Audio Unit present in OS 10.4 and later.
-// The default output Audio Unit is for low-latency audio I/O.
-class AUAudioOutputStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AUAudioOutputStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~AUAudioOutputStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- static int HardwareSampleRate();
-
- private:
- // DefaultOutputUnit callback.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus Render(UInt32 number_of_frames, AudioBufferList* io_data,
- const AudioTimeStamp* output_time_stamp);
-
- // Sets up the stream format for the default output Audio Unit.
- bool Configure();
-
- // Gets the fixed playout device hardware latency and stores it. Returns 0
- // if not available.
- double GetHardwareLatency();
-
- // Gets the current playout latency value.
- double GetPlayoutLatency(const AudioTimeStamp* output_time_stamp);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- size_t number_of_frames_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Protects |source_|. Necessary since Render() calls seem to be in flight
- // when |output_unit_| is supposedly stopped. See http://crbug.com/178765.
- base::Lock source_lock_;
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
-
- // The default output Audio Unit which talks to the audio hardware.
- AudioUnit output_unit_;
-
- // The UID refers to the current output audio device.
- AudioDeviceID output_device_id_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Fixed playout hardware latency in frames.
- double hardware_latency_frames_;
-
- // The flag used to stop the streaming.
- bool stopped_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreData().
- scoped_ptr<AudioBus> audio_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AUAudioOutputStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_LOW_LATENCY_OUTPUT_MAC_H_
diff --git a/chromium/media/audio/mac/audio_manager_mac.cc b/chromium/media/audio/mac/audio_manager_mac.cc
index be7dddd5bb6..970720679ab 100644
--- a/chromium/media/audio/mac/audio_manager_mac.cc
+++ b/chromium/media/audio/mac/audio_manager_mac.cc
@@ -11,15 +11,15 @@
#include "base/command_line.h"
#include "base/mac/mac_logging.h"
#include "base/mac/scoped_cftyperef.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_observer.h"
#include "base/strings/sys_string_conversions.h"
+#include "base/threading/thread_checker.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/mac/audio_auhal_mac.h"
#include "media/audio/mac/audio_input_mac.h"
#include "media/audio/mac/audio_low_latency_input_mac.h"
-#include "media/audio/mac/audio_low_latency_output_mac.h"
-#include "media/audio/mac/audio_synchronized_mac.h"
-#include "media/audio/mac/audio_unified_mac.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
@@ -29,8 +29,9 @@ namespace media {
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 50;
-// Default buffer size in samples for low-latency input and output streams.
-static const int kDefaultLowLatencyBufferSize = 128;
+// Define bounds for for low-latency input and output streams.
+static const int kMinimumInputOutputBufferSize = 128;
+static const int kMaximumInputOutputBufferSize = 4096;
// Default sample-rate on most Apple hardware.
static const int kFallbackSampleRate = 44100;
@@ -53,16 +54,6 @@ static bool HasAudioHardware(AudioObjectPropertySelector selector) {
output_device_id != kAudioObjectUnknown;
}
-// Returns true if the default input device is the same as
-// the default output device.
-bool AudioManagerMac::HasUnifiedDefaultIO() {
- AudioDeviceID input_id, output_id;
- if (!GetDefaultInputDevice(&input_id) || !GetDefaultOutputDevice(&output_id))
- return false;
-
- return input_id == output_id;
-}
-
// Retrieves information on audio devices, and prepends the default
// device to the list if the list is non-empty.
static void GetAudioDeviceInfo(bool is_input,
@@ -86,8 +77,8 @@ static void GetAudioDeviceInfo(bool is_input,
// Get the array of device ids for all the devices, which includes both
// input devices and output devices.
- scoped_ptr_malloc<AudioDeviceID>
- devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ scoped_ptr<AudioDeviceID, base::FreeDeleter>
+ devices(static_cast<AudioDeviceID*>(malloc(size)));
AudioDeviceID* device_ids = devices.get();
result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
&property_address,
@@ -220,28 +211,87 @@ static AudioDeviceID GetAudioDeviceIdByUId(bool is_input,
return audio_device_id;
}
+template <class T>
+void StopStreams(std::list<T*>* streams) {
+ for (typename std::list<T*>::iterator it = streams->begin();
+ it != streams->end();
+ ++it) {
+ // Stop() is safe to call multiple times, so it doesn't matter if a stream
+ // has already been stopped.
+ (*it)->Stop();
+ }
+ streams->clear();
+}
+
+class AudioManagerMac::AudioPowerObserver : public base::PowerObserver {
+ public:
+ AudioPowerObserver()
+ : is_suspending_(false),
+ is_monitoring_(base::PowerMonitor::Get()) {
+ // The PowerMonitor requires signifcant setup (a CFRunLoop and preallocated
+ // IO ports) so it's not available under unit tests. See the OSX impl of
+ // base::PowerMonitorDeviceSource for more details.
+ if (!is_monitoring_)
+ return;
+ base::PowerMonitor::Get()->AddObserver(this);
+ }
+
+ virtual ~AudioPowerObserver() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!is_monitoring_)
+ return;
+ base::PowerMonitor::Get()->RemoveObserver(this);
+ }
+
+ bool ShouldDeferStreamStart() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ // Start() should be deferred if the system is in the middle of a suspend or
+ // has recently started the process of resuming.
+ return is_suspending_ || base::TimeTicks::Now() < earliest_start_time_;
+ }
+
+ private:
+ virtual void OnSuspend() OVERRIDE {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ is_suspending_ = true;
+ }
+
+ virtual void OnResume() OVERRIDE {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ is_suspending_ = false;
+ earliest_start_time_ = base::TimeTicks::Now() +
+ base::TimeDelta::FromSeconds(kStartDelayInSecsForPowerEvents);
+ }
+
+ bool is_suspending_;
+ const bool is_monitoring_;
+ base::TimeTicks earliest_start_time_;
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioPowerObserver);
+};
+
AudioManagerMac::AudioManagerMac(AudioLogFactory* audio_log_factory)
: AudioManagerBase(audio_log_factory),
- current_sample_rate_(0) {
- current_output_device_ = kAudioDeviceUnknown;
-
+ current_sample_rate_(0),
+ current_output_device_(kAudioDeviceUnknown) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
// Task must be posted last to avoid races from handing out "this" to the
// audio thread. Always PostTask even if we're on the right thread since
// AudioManager creation is on the startup path and this may be slow.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::CreateDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::InitializeOnAudioThread, base::Unretained(this)));
}
AudioManagerMac::~AudioManagerMac() {
- if (GetMessageLoop()->BelongsToCurrentThread()) {
- DestroyDeviceListener();
+ if (GetTaskRunner()->BelongsToCurrentThread()) {
+ ShutdownOnAudioThread();
} else {
// It's safe to post a task here since Shutdown() will wait for all tasks to
// complete before returning.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::DestroyDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerMac::ShutdownOnAudioThread, base::Unretained(this)));
}
Shutdown();
@@ -257,18 +307,15 @@ bool AudioManagerMac::HasAudioInputDevices() {
// TODO(xians): There are several places on the OSX specific code which
// could benefit from these helper functions.
-bool AudioManagerMac::GetDefaultInputDevice(
- AudioDeviceID* device) {
+bool AudioManagerMac::GetDefaultInputDevice(AudioDeviceID* device) {
return GetDefaultDevice(device, true);
}
-bool AudioManagerMac::GetDefaultOutputDevice(
- AudioDeviceID* device) {
+bool AudioManagerMac::GetDefaultOutputDevice(AudioDeviceID* device) {
return GetDefaultDevice(device, false);
}
-bool AudioManagerMac::GetDefaultDevice(
- AudioDeviceID* device, bool input) {
+bool AudioManagerMac::GetDefaultDevice(AudioDeviceID* device, bool input) {
CHECK(device);
// Obtain the current output device selected by the user.
@@ -279,14 +326,12 @@ bool AudioManagerMac::GetDefaultDevice(
pa.mElement = kAudioObjectPropertyElementMaster;
UInt32 size = sizeof(*device);
-
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- device);
+ OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject,
+ &pa,
+ 0,
+ 0,
+ &size,
+ device);
if ((result != kAudioHardwareNoError) || (*device == kAudioDeviceUnknown)) {
DLOG(ERROR) << "Error getting default AudioDevice.";
@@ -296,21 +341,16 @@ bool AudioManagerMac::GetDefaultDevice(
return true;
}
-bool AudioManagerMac::GetDefaultOutputChannels(
- int* channels) {
+bool AudioManagerMac::GetDefaultOutputChannels(int* channels) {
AudioDeviceID device;
if (!GetDefaultOutputDevice(&device))
return false;
-
- return GetDeviceChannels(device,
- kAudioDevicePropertyScopeOutput,
- channels);
+ return GetDeviceChannels(device, kAudioDevicePropertyScopeOutput, channels);
}
-bool AudioManagerMac::GetDeviceChannels(
- AudioDeviceID device,
- AudioObjectPropertyScope scope,
- int* channels) {
+bool AudioManagerMac::GetDeviceChannels(AudioDeviceID device,
+ AudioObjectPropertyScope scope,
+ int* channels) {
CHECK(channels);
// Get stream configuration.
@@ -329,13 +369,7 @@ bool AudioManagerMac::GetDeviceChannels(
AudioBufferList& buffer_list =
*reinterpret_cast<AudioBufferList*>(list_storage.get());
- result = AudioObjectGetPropertyData(
- device,
- &pa,
- 0,
- 0,
- &size,
- &buffer_list);
+ result = AudioObjectGetPropertyData(device, &pa, 0, 0, &size, &buffer_list);
if (result != noErr)
return false;
@@ -362,13 +396,12 @@ int AudioManagerMac::HardwareSampleRateForDevice(AudioDeviceID device_id) {
kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster
};
- OSStatus result = AudioObjectGetPropertyData(
- device_id,
- &kNominalSampleRateAddress,
- 0,
- 0,
- &info_size,
- &nominal_sample_rate);
+ OSStatus result = AudioObjectGetPropertyData(device_id,
+ &kNominalSampleRateAddress,
+ 0,
+ 0,
+ &info_size,
+ &nominal_sample_rate);
if (result != noErr) {
OSSTATUS_DLOG(WARNING, result)
<< "Could not get default sample rate for device: " << device_id;
@@ -401,16 +434,12 @@ void AudioManagerMac::GetAudioOutputDeviceNames(
AudioParameters AudioManagerMac::GetInputStreamParameters(
const std::string& device_id) {
- // Due to the sharing of the input and output buffer sizes, we need to choose
- // the input buffer size based on the output sample rate. See
- // http://crbug.com/154352.
- const int buffer_size = ChooseBufferSize(
- AUAudioOutputStream::HardwareSampleRate());
-
AudioDeviceID device = GetAudioDeviceIdByUId(true, device_id);
if (device == kAudioObjectUnknown) {
DLOG(ERROR) << "Invalid device " << device_id;
- return AudioParameters();
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kFallbackSampleRate, 16, ChooseBufferSize(kFallbackSampleRate));
}
int channels = 0;
@@ -427,6 +456,11 @@ AudioParameters AudioManagerMac::GetInputStreamParameters(
if (!sample_rate)
sample_rate = kFallbackSampleRate;
+ // Due to the sharing of the input and output buffer sizes, we need to choose
+ // the input buffer size based on the output sample rate. See
+ // http://crbug.com/154352.
+ const int buffer_size = ChooseBufferSize(sample_rate);
+
// TODO(xians): query the native channel layout for the specific device.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
@@ -450,8 +484,8 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
return std::string();
int device_count = size / sizeof(AudioDeviceID);
- scoped_ptr_malloc<AudioDeviceID>
- devices(reinterpret_cast<AudioDeviceID*>(malloc(size)));
+ scoped_ptr<AudioDeviceID, base::FreeDeleter>
+ devices(static_cast<AudioDeviceID*>(malloc(size)));
result = AudioObjectGetPropertyData(
device, &pa, 0, NULL, &size, devices.get());
if (result)
@@ -514,72 +548,38 @@ std::string AudioManagerMac::GetAssociatedOutputDeviceID(
AudioOutputStream* AudioManagerMac::MakeLinearOutputStream(
const AudioParameters& params) {
- return MakeLowLatencyOutputStream(params, std::string(), std::string());
+ return MakeLowLatencyOutputStream(params, std::string());
}
AudioOutputStream* AudioManagerMac::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- // Handle basic output with no input channels.
- if (params.input_channels() == 0) {
- AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
- if (device == kAudioObjectUnknown) {
- DLOG(ERROR) << "Failed to open output device: " << device_id;
- return NULL;
- }
- return new AUHALStream(this, params, device);
- }
-
- DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
-
- // TODO(xians): support more than stereo input.
- if (params.input_channels() != 2) {
- // WebAudio is currently hard-coded to 2 channels so we should not
- // see this case.
- NOTREACHED() << "Only stereo input is currently supported!";
+ const std::string& device_id) {
+ AudioDeviceID device = GetAudioDeviceIdByUId(false, device_id);
+ if (device == kAudioObjectUnknown) {
+ DLOG(ERROR) << "Failed to open output device: " << device_id;
return NULL;
}
- AudioDeviceID device = kAudioObjectUnknown;
- if (HasUnifiedDefaultIO()) {
- // For I/O, the simplest case is when the default input and output
- // devices are the same.
- GetDefaultOutputDevice(&device);
- VLOG(0) << "UNIFIED: default input and output devices are identical";
- } else {
- // Some audio hardware is presented as separate input and output devices
- // even though they are really the same physical hardware and
- // share the same "clock domain" at the lowest levels of the driver.
- // A common of example of this is the "built-in" audio hardware:
- // "Built-in Line Input"
- // "Built-in Output"
- // We would like to use an "aggregate" device for these situations, since
- // CoreAudio will make the most efficient use of the shared "clock domain"
- // so we get the lowest latency and use fewer threads.
- device = aggregate_device_manager_.GetDefaultAggregateDevice();
- if (device != kAudioObjectUnknown)
- VLOG(0) << "Using AGGREGATE audio device";
+ // Lazily create the audio device listener on the first stream creation.
+ if (!output_device_listener_) {
+ // NOTE: Use BindToCurrentLoop() to ensure the callback is always PostTask'd
+ // even if OSX calls us on the right thread. Some CoreAudio drivers will
+ // fire the callbacks during stream creation, leading to re-entrancy issues
+ // otherwise. See http://crbug.com/349604
+ output_device_listener_.reset(
+ new AudioDeviceListenerMac(BindToCurrentLoop(base::Bind(
+ &AudioManagerMac::HandleDeviceChanges, base::Unretained(this)))));
+ // Only set the current output device for the default device.
+ if (device_id == AudioManagerBase::kDefaultDeviceId || device_id.empty())
+ current_output_device_ = device;
+ // Just use the current sample rate since we don't allow non-native sample
+ // rates on OSX.
+ current_sample_rate_ = params.sample_rate();
}
- if (device != kAudioObjectUnknown &&
- input_device_id == AudioManagerBase::kDefaultDeviceId)
- return new AUHALStream(this, params, device);
-
- // Fallback to AudioSynchronizedStream which will handle completely
- // different and arbitrary combinations of input and output devices
- // even running at different sample-rates.
- // kAudioDeviceUnknown translates to "use default" here.
- // TODO(xians): consider tracking UMA stats on AUHALStream
- // versus AudioSynchronizedStream.
- AudioDeviceID audio_device_id = GetAudioDeviceIdByUId(true, input_device_id);
- if (audio_device_id == kAudioObjectUnknown)
- return NULL;
-
- return new AudioSynchronizedStream(this,
- params,
- audio_device_id,
- kAudioDeviceUnknown);
+ AudioOutputStream* stream = new AUHALStream(this, params, device);
+ output_streams_.push_back(stream);
+ return stream;
}
std::string AudioManagerMac::GetDefaultOutputDeviceID() {
@@ -612,7 +612,9 @@ std::string AudioManagerMac::GetDefaultOutputDeviceID() {
AudioInputStream* AudioManagerMac::MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return new PCMQueueInAudioInputStream(this, params);
+ AudioInputStream* stream = new PCMQueueInAudioInputStream(this, params);
+ input_streams_.push_back(stream);
+ return stream;
}
AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
@@ -635,6 +637,7 @@ AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
params);
stream = new AUAudioInputStream(this, params, output_params,
audio_device_id);
+ input_streams_.push_back(stream);
}
return stream;
@@ -643,81 +646,87 @@ AudioInputStream* AudioManagerMac::MakeLowLatencyInputStream(
AudioParameters AudioManagerMac::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
- AudioDeviceID device = GetAudioDeviceIdByUId(false, output_device_id);
+ const AudioDeviceID device = GetAudioDeviceIdByUId(false, output_device_id);
if (device == kAudioObjectUnknown) {
DLOG(ERROR) << "Invalid output device " << output_device_id;
- return AudioParameters();
+ return input_params.IsValid() ? input_params : AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ kFallbackSampleRate, 16, ChooseBufferSize(kFallbackSampleRate));
+ }
+
+ const bool has_valid_input_params = input_params.IsValid();
+ const int hardware_sample_rate = HardwareSampleRateForDevice(device);
+
+ // Allow pass through buffer sizes. If concurrent input and output streams
+ // exist, they will use the smallest buffer size amongst them. As such, each
+ // stream must be able to FIFO requests appropriately when this happens.
+ int buffer_size = ChooseBufferSize(hardware_sample_rate);
+ if (has_valid_input_params) {
+ buffer_size =
+ std::min(kMaximumInputOutputBufferSize,
+ std::max(input_params.frames_per_buffer(), buffer_size));
}
- int hardware_channels = 2;
+ int hardware_channels;
if (!GetDeviceChannels(device, kAudioDevicePropertyScopeOutput,
&hardware_channels)) {
- // Fallback to stereo.
hardware_channels = 2;
}
- ChannelLayout channel_layout = GuessChannelLayout(hardware_channels);
+ // Use the input channel count and channel layout if possible. Let OSX take
+ // care of remapping the channels; this lets user specified channel layouts
+ // work correctly.
+ int output_channels = input_params.channels();
+ ChannelLayout channel_layout = input_params.channel_layout();
+ if (!has_valid_input_params || output_channels > hardware_channels) {
+ output_channels = hardware_channels;
+ channel_layout = GuessChannelLayout(output_channels);
+ if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
+ channel_layout = CHANNEL_LAYOUT_DISCRETE;
+ }
- const int hardware_sample_rate = HardwareSampleRateForDevice(device);
- const int buffer_size = ChooseBufferSize(hardware_sample_rate);
-
- int input_channels = 0;
- if (input_params.IsValid()) {
- input_channels = input_params.input_channels();
-
- if (input_channels > 0) {
- // TODO(xians): given the limitations of the AudioOutputStream
- // back-ends used with synchronized I/O, we hard-code to stereo.
- // Specifically, this is a limitation of AudioSynchronizedStream which
- // can be removed as part of the work to consolidate these back-ends.
- channel_layout = CHANNEL_LAYOUT_STEREO;
- }
+ const int input_channels =
+ has_valid_input_params ? input_params.input_channels() : 0;
+ if (input_channels > 0) {
+ // TODO(xians): given the limitations of the AudioOutputStream
+ // back-ends used with synchronized I/O, we hard-code to stereo.
+ // Specifically, this is a limitation of AudioSynchronizedStream which
+ // can be removed as part of the work to consolidate these back-ends.
+ channel_layout = CHANNEL_LAYOUT_STEREO;
}
- if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)
- channel_layout = CHANNEL_LAYOUT_DISCRETE;
- else
- hardware_channels = ChannelLayoutToChannelCount(channel_layout);
-
- AudioParameters params(
- AudioParameters::AUDIO_PCM_LOW_LATENCY,
- channel_layout,
- hardware_channels,
- input_channels,
- hardware_sample_rate,
- 16,
- buffer_size,
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, output_channels,
+ input_channels, hardware_sample_rate, 16, buffer_size,
AudioParameters::NO_EFFECTS);
-
- return params;
}
-void AudioManagerMac::CreateDeviceListener() {
- DCHECK(GetMessageLoop()->BelongsToCurrentThread());
-
- // Get a baseline for the sample-rate and current device,
- // so we can intelligently handle device notifications only when necessary.
- current_sample_rate_ = HardwareSampleRate();
- if (!GetDefaultOutputDevice(&current_output_device_))
- current_output_device_ = kAudioDeviceUnknown;
-
- output_device_listener_.reset(new AudioDeviceListenerMac(base::Bind(
- &AudioManagerMac::HandleDeviceChanges, base::Unretained(this))));
+void AudioManagerMac::InitializeOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ power_observer_.reset(new AudioPowerObserver());
}
-void AudioManagerMac::DestroyDeviceListener() {
- DCHECK(GetMessageLoop()->BelongsToCurrentThread());
+void AudioManagerMac::ShutdownOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
output_device_listener_.reset();
+ power_observer_.reset();
+
+ // Since CoreAudio calls have to run on the UI thread and browser shutdown
+ // doesn't wait for outstanding tasks to complete, we may have input/output
+ // streams still running at shutdown.
+ //
+ // To avoid calls into destructed classes, we need to stop the OS callbacks
+ // by stopping the streams. Note: The streams are leaked since process
+ // destruction is imminent.
+ //
+ // See http://crbug.com/354139 for crash details.
+ StopStreams(&input_streams_);
+ StopStreams(&output_streams_);
}
void AudioManagerMac::HandleDeviceChanges() {
- if (!GetMessageLoop()->BelongsToCurrentThread()) {
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerMac::HandleDeviceChanges, base::Unretained(this)));
- return;
- }
-
- int new_sample_rate = HardwareSampleRate();
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ const int new_sample_rate = HardwareSampleRate();
AudioDeviceID new_output_device;
GetDefaultOutputDevice(&new_output_device);
@@ -731,7 +740,7 @@ void AudioManagerMac::HandleDeviceChanges() {
}
int AudioManagerMac::ChooseBufferSize(int output_sample_rate) {
- int buffer_size = kDefaultLowLatencyBufferSize;
+ int buffer_size = kMinimumInputOutputBufferSize;
const int user_buffer_size = GetUserBufferSize();
if (user_buffer_size) {
buffer_size = user_buffer_size;
@@ -739,14 +748,29 @@ int AudioManagerMac::ChooseBufferSize(int output_sample_rate) {
// The default buffer size is too small for higher sample rates and may lead
// to glitching. Adjust upwards by multiples of the default size.
if (output_sample_rate <= 96000)
- buffer_size = 2 * kDefaultLowLatencyBufferSize;
+ buffer_size = 2 * kMinimumInputOutputBufferSize;
else if (output_sample_rate <= 192000)
- buffer_size = 4 * kDefaultLowLatencyBufferSize;
+ buffer_size = 4 * kMinimumInputOutputBufferSize;
}
return buffer_size;
}
+bool AudioManagerMac::ShouldDeferStreamStart() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ return power_observer_->ShouldDeferStreamStart();
+}
+
+void AudioManagerMac::ReleaseOutputStream(AudioOutputStream* stream) {
+ output_streams_.remove(stream);
+ AudioManagerBase::ReleaseOutputStream(stream);
+}
+
+void AudioManagerMac::ReleaseInputStream(AudioInputStream* stream) {
+ input_streams_.remove(stream);
+ AudioManagerBase::ReleaseInputStream(stream);
+}
+
AudioManager* CreateAudioManager(AudioLogFactory* audio_log_factory) {
return new AudioManagerMac(audio_log_factory);
}
diff --git a/chromium/media/audio/mac/audio_manager_mac.h b/chromium/media/audio/mac/audio_manager_mac.h
index fb521c940de..490b0b6bbdd 100644
--- a/chromium/media/audio/mac/audio_manager_mac.h
+++ b/chromium/media/audio/mac/audio_manager_mac.h
@@ -6,13 +6,12 @@
#define MEDIA_AUDIO_MAC_AUDIO_MANAGER_MAC_H_
#include <CoreAudio/AudioHardware.h>
+#include <list>
#include <string>
#include "base/basictypes.h"
#include "base/compiler_specific.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "media/audio/audio_manager_base.h"
-#include "media/audio/mac/aggregate_device_manager.h"
#include "media/audio/mac/audio_device_listener_mac.h"
namespace media {
@@ -41,14 +40,17 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual std::string GetDefaultOutputDeviceID() OVERRIDE;
+ // Used to track destruction of input and output streams.
+ virtual void ReleaseOutputStream(AudioOutputStream* stream) OVERRIDE;
+ virtual void ReleaseInputStream(AudioInputStream* stream) OVERRIDE;
+
static bool GetDefaultInputDevice(AudioDeviceID* device);
static bool GetDefaultOutputDevice(AudioDeviceID* device);
static bool GetDefaultDevice(AudioDeviceID* device, bool input);
@@ -62,6 +64,17 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
static int HardwareSampleRateForDevice(AudioDeviceID device_id);
static int HardwareSampleRate();
+ // OSX has issues with starting streams as the sytem goes into suspend and
+ // immediately after it wakes up from resume. See http://crbug.com/160920.
+ // As a workaround we delay Start() when it occurs after suspend and for a
+ // small amount of time after resume.
+ //
+ // Streams should consult ShouldDeferStreamStart() and if true check the value
+ // again after |kStartDelayInSecsForPowerEvents| has elapsed. If false, the
+ // stream may be started immediately.
+ enum { kStartDelayInSecsForPowerEvents = 1 };
+ bool ShouldDeferStreamStart();
+
protected:
virtual ~AudioManagerMac();
@@ -70,11 +83,8 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
const AudioParameters& input_params) OVERRIDE;
private:
- bool HasUnifiedDefaultIO();
-
- // Helper methods for constructing AudioDeviceListenerMac on the audio thread.
- void CreateDeviceListener();
- void DestroyDeviceListener();
+ void InitializeOnAudioThread();
+ void ShutdownOnAudioThread();
int ChooseBufferSize(int output_sample_rate);
@@ -89,7 +99,16 @@ class MEDIA_EXPORT AudioManagerMac : public AudioManagerBase {
int current_sample_rate_;
AudioDeviceID current_output_device_;
- AggregateDeviceManager aggregate_device_manager_;
+ // Helper class which monitors power events to determine if output streams
+ // should defer Start() calls. Required to workaround an OSX bug. See
+ // http://crbug.com/160920 for more details.
+ class AudioPowerObserver;
+ scoped_ptr<AudioPowerObserver> power_observer_;
+
+ // Tracks all constructed input and output streams so they can be stopped at
+ // shutdown. See ShutdownOnAudioThread() for more details.
+ std::list<AudioInputStream*> input_streams_;
+ std::list<AudioOutputStream*> output_streams_;
DISALLOW_COPY_AND_ASSIGN(AudioManagerMac);
};
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.cc b/chromium/media/audio/mac/audio_synchronized_mac.cc
deleted file mode 100644
index a9bc88e2bd3..00000000000
--- a/chromium/media/audio/mac/audio_synchronized_mac.cc
+++ /dev/null
@@ -1,976 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_synchronized_mac.h"
-
-#include <CoreServices/CoreServices.h>
-#include <algorithm>
-
-#include "base/basictypes.h"
-#include "base/debug/trace_event.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/mac/audio_manager_mac.h"
-#include "media/base/channel_mixer.h"
-
-namespace media {
-
-static const int kHardwareBufferSize = 128;
-static const int kFifoSize = 16384;
-
-// TODO(crogers): handle the non-stereo case.
-static const int kChannels = 2;
-
-// This value was determined empirically for minimum latency while still
-// guarding against FIFO under-runs.
-static const int kBaseTargetFifoFrames = 256 + 64;
-
-// If the input and output sample-rate don't match, then we need to maintain
-// an additional safety margin due to the callback timing jitter and the
-// varispeed buffering. This value was empirically tuned.
-static const int kAdditionalTargetFifoFrames = 128;
-
-static void ZeroBufferList(AudioBufferList* buffer_list) {
- for (size_t i = 0; i < buffer_list->mNumberBuffers; ++i)
- memset(buffer_list->mBuffers[i].mData,
- 0,
- buffer_list->mBuffers[i].mDataByteSize);
-}
-
-static void WrapBufferList(AudioBufferList* buffer_list,
- AudioBus* bus,
- int frames) {
- DCHECK(buffer_list);
- DCHECK(bus);
- int channels = bus->channels();
- int buffer_list_channels = buffer_list->mNumberBuffers;
-
- // Copy pointers from AudioBufferList.
- int source_idx = 0;
- for (int i = 0; i < channels; ++i) {
- bus->SetChannelData(
- i, static_cast<float*>(buffer_list->mBuffers[source_idx].mData));
-
- // It's ok to pass in a |buffer_list| with fewer channels, in which
- // case we just duplicate the last channel.
- if (source_idx < buffer_list_channels - 1)
- ++source_idx;
- }
-
- // Finally set the actual length.
- bus->set_frames(frames);
-}
-
-AudioSynchronizedStream::AudioSynchronizedStream(
- AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID input_id,
- AudioDeviceID output_id)
- : manager_(manager),
- params_(params),
- input_sample_rate_(0),
- output_sample_rate_(0),
- input_id_(input_id),
- output_id_(output_id),
- input_buffer_list_(NULL),
- fifo_(kChannels, kFifoSize),
- target_fifo_frames_(kBaseTargetFifoFrames),
- average_delta_(0.0),
- fifo_rate_compensation_(1.0),
- input_unit_(0),
- varispeed_unit_(0),
- output_unit_(0),
- first_input_time_(-1),
- is_running_(false),
- hardware_buffer_size_(kHardwareBufferSize),
- channels_(kChannels) {
- VLOG(1) << "AudioSynchronizedStream::AudioSynchronizedStream()";
-}
-
-AudioSynchronizedStream::~AudioSynchronizedStream() {
- DCHECK(!input_unit_);
- DCHECK(!output_unit_);
- DCHECK(!varispeed_unit_);
-}
-
-bool AudioSynchronizedStream::Open() {
- if (params_.channels() != kChannels) {
- LOG(ERROR) << "Only stereo output is currently supported.";
- return false;
- }
-
- // Create the input, output, and varispeed AudioUnits.
- OSStatus result = CreateAudioUnits();
- if (result != noErr) {
- LOG(ERROR) << "Cannot create AudioUnits.";
- return false;
- }
-
- result = SetupInput(input_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error configuring input AudioUnit.";
- return false;
- }
-
- result = SetupOutput(output_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error configuring output AudioUnit.";
- return false;
- }
-
- result = SetupCallbacks();
- if (result != noErr) {
- LOG(ERROR) << "Error setting up callbacks on AudioUnits.";
- return false;
- }
-
- result = SetupStreamFormats();
- if (result != noErr) {
- LOG(ERROR) << "Error configuring stream formats on AudioUnits.";
- return false;
- }
-
- AllocateInputData();
-
- // Final initialization of the AudioUnits.
- result = AudioUnitInitialize(input_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing input AudioUnit.";
- return false;
- }
-
- result = AudioUnitInitialize(output_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing output AudioUnit.";
- return false;
- }
-
- result = AudioUnitInitialize(varispeed_unit_);
- if (result != noErr) {
- LOG(ERROR) << "Error initializing varispeed AudioUnit.";
- return false;
- }
-
- if (input_sample_rate_ != output_sample_rate_) {
- // Add extra safety margin.
- target_fifo_frames_ += kAdditionalTargetFifoFrames;
- }
-
- // Buffer initial silence corresponding to target I/O buffering.
- fifo_.Clear();
- scoped_ptr<AudioBus> silence =
- AudioBus::Create(channels_, target_fifo_frames_);
- silence->Zero();
- fifo_.Push(silence.get());
-
- return true;
-}
-
-void AudioSynchronizedStream::Close() {
- DCHECK(!is_running_);
-
- if (input_buffer_list_) {
- free(input_buffer_list_);
- input_buffer_list_ = 0;
- input_bus_.reset(NULL);
- wrapper_bus_.reset(NULL);
- }
-
- if (input_unit_) {
- AudioUnitUninitialize(input_unit_);
- CloseComponent(input_unit_);
- }
-
- if (output_unit_) {
- AudioUnitUninitialize(output_unit_);
- CloseComponent(output_unit_);
- }
-
- if (varispeed_unit_) {
- AudioUnitUninitialize(varispeed_unit_);
- CloseComponent(varispeed_unit_);
- }
-
- input_unit_ = NULL;
- output_unit_ = NULL;
- varispeed_unit_ = NULL;
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AudioSynchronizedStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK(input_unit_);
- DCHECK(output_unit_);
- DCHECK(varispeed_unit_);
-
- if (is_running_ || !input_unit_ || !output_unit_ || !varispeed_unit_)
- return;
-
- source_ = callback;
-
- // Reset state variables each time we Start().
- fifo_rate_compensation_ = 1.0;
- average_delta_ = 0.0;
-
- OSStatus result = noErr;
-
- if (!is_running_) {
- first_input_time_ = -1;
-
- result = AudioOutputUnitStart(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- result = AudioOutputUnitStart(output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
- }
-
- is_running_ = true;
-}
-
-void AudioSynchronizedStream::Stop() {
- OSStatus result = noErr;
- if (is_running_) {
- result = AudioOutputUnitStop(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- result = AudioOutputUnitStop(output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
- }
-
- if (result == noErr)
- is_running_ = false;
-}
-
-bool AudioSynchronizedStream::IsRunning() {
- return is_running_;
-}
-
-// TODO(crogers): implement - or remove from AudioOutputStream.
-void AudioSynchronizedStream::SetVolume(double volume) {}
-void AudioSynchronizedStream::GetVolume(double* volume) {}
-
-OSStatus AudioSynchronizedStream::SetOutputDeviceAsCurrent(
- AudioDeviceID output_id) {
- OSStatus result = noErr;
-
- // Get the default output device if device is unknown.
- if (output_id == kAudioDeviceUnknown) {
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(output_id);
-
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &output_id);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
- }
-
- // Set the render frame size.
- UInt32 frame_size = hardware_buffer_size_;
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- output_id,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- output_info_.Initialize(output_id, false);
-
- // Set the Current Device to the Default Output Unit.
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &output_info_.id_,
- sizeof(output_info_.id_));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetInputDeviceAsCurrent(
- AudioDeviceID input_id) {
- OSStatus result = noErr;
-
- // Get the default input device if device is unknown.
- if (input_id == kAudioDeviceUnknown) {
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultInputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
- UInt32 size = sizeof(input_id);
-
- result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &input_id);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
- }
-
- // Set the render frame size.
- UInt32 frame_size = hardware_buffer_size_;
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- input_id,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- input_info_.Initialize(input_id, true);
-
- // Set the Current Device to the AUHAL.
- // This should be done only after I/O has been enabled on the AUHAL.
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_CurrentDevice,
- kAudioUnitScope_Global,
- 0,
- &input_info_.id_,
- sizeof(input_info_.id_));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::CreateAudioUnits() {
- // Q: Why do we need a varispeed unit?
- // A: If the input device and the output device are running at
- // different sample rates and/or on different clocks, we will need
- // to compensate to avoid a pitch change and
- // to avoid buffer under and over runs.
- ComponentDescription varispeed_desc;
- varispeed_desc.componentType = kAudioUnitType_FormatConverter;
- varispeed_desc.componentSubType = kAudioUnitSubType_Varispeed;
- varispeed_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- varispeed_desc.componentFlags = 0;
- varispeed_desc.componentFlagsMask = 0;
-
- Component varispeed_comp = FindNextComponent(NULL, &varispeed_desc);
- if (varispeed_comp == NULL)
- return -1;
-
- OSStatus result = OpenAComponent(varispeed_comp, &varispeed_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Open input AudioUnit.
- ComponentDescription input_desc;
- input_desc.componentType = kAudioUnitType_Output;
- input_desc.componentSubType = kAudioUnitSubType_HALOutput;
- input_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- input_desc.componentFlags = 0;
- input_desc.componentFlagsMask = 0;
-
- Component input_comp = FindNextComponent(NULL, &input_desc);
- if (input_comp == NULL)
- return -1;
-
- result = OpenAComponent(input_comp, &input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Open output AudioUnit.
- ComponentDescription output_desc;
- output_desc.componentType = kAudioUnitType_Output;
- output_desc.componentSubType = kAudioUnitSubType_DefaultOutput;
- output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- output_desc.componentFlags = 0;
- output_desc.componentFlagsMask = 0;
-
- Component output_comp = FindNextComponent(NULL, &output_desc);
- if (output_comp == NULL)
- return -1;
-
- result = OpenAComponent(output_comp, &output_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::SetupInput(AudioDeviceID input_id) {
- // The AUHAL used for input needs to be initialized
- // before anything is done to it.
- OSStatus result = AudioUnitInitialize(input_unit_);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // We must enable the Audio Unit (AUHAL) for input and disable output
- // BEFORE setting the AUHAL's current device.
- result = EnableIO();
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = SetInputDeviceAsCurrent(input_id);
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::EnableIO() {
- // Enable input on the AUHAL.
- UInt32 enable_io = 1;
- OSStatus result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- 1, // input element
- &enable_io,
- sizeof(enable_io));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Disable Output on the AUHAL.
- enable_io = 0;
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output,
- 0, // output element
- &enable_io,
- sizeof(enable_io));
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupOutput(AudioDeviceID output_id) {
- OSStatus result = noErr;
-
- result = SetOutputDeviceAsCurrent(output_id);
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Tell the output unit not to reset timestamps.
- // Otherwise sample rate changes will cause sync loss.
- UInt32 start_at_zero = 0;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioOutputUnitProperty_StartTimestampsAtZero,
- kAudioUnitScope_Global,
- 0,
- &start_at_zero,
- sizeof(start_at_zero));
-
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupCallbacks() {
- // Set the input callback.
- AURenderCallbackStruct callback;
- callback.inputProc = InputProc;
- callback.inputProcRefCon = this;
- OSStatus result = AudioUnitSetProperty(
- input_unit_,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the output callback.
- callback.inputProc = OutputProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the varispeed callback.
- callback.inputProc = VarispeedProc;
- callback.inputProcRefCon = this;
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Input,
- 0,
- &callback,
- sizeof(callback));
-
- OSSTATUS_DCHECK(result == noErr, result);
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::SetupStreamFormats() {
- AudioStreamBasicDescription asbd, asbd_dev1_in, asbd_dev2_out;
-
- // Get the Stream Format (Output client side).
- UInt32 property_size = sizeof(asbd_dev1_in);
- OSStatus result = AudioUnitGetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 1,
- &asbd_dev1_in,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Get the Stream Format (client side).
- property_size = sizeof(asbd);
- result = AudioUnitGetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &asbd,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Get the Stream Format (Output client side).
- property_size = sizeof(asbd_dev2_out);
- result = AudioUnitGetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 0,
- &asbd_dev2_out,
- &property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the format of all the AUs to the input/output devices channel count.
- // For a simple case, you want to set this to
- // the lower of count of the channels in the input device vs output device.
- asbd.mChannelsPerFrame = std::min(asbd_dev1_in.mChannelsPerFrame,
- asbd_dev2_out.mChannelsPerFrame);
-
- // We must get the sample rate of the input device and set it to the
- // stream format of AUHAL.
- Float64 rate = 0;
- property_size = sizeof(rate);
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectGetPropertyData(
- input_info_.id_,
- &pa,
- 0,
- 0,
- &property_size,
- &rate);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- input_sample_rate_ = rate;
-
- asbd.mSampleRate = rate;
- property_size = sizeof(asbd);
-
- // Set the new formats to the AUs...
- result = AudioUnitSetProperty(
- input_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 1,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Set the correct sample rate for the output device,
- // but keep the channel count the same.
- property_size = sizeof(rate);
-
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectGetPropertyData(
- output_info_.id_,
- &pa,
- 0,
- 0,
- &property_size,
- &rate);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- output_sample_rate_ = rate;
-
- // The requested sample-rate must match the hardware sample-rate.
- if (output_sample_rate_ != params_.sample_rate()) {
- LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
- << " must match the hardware sample-rate: " << output_sample_rate_;
- return kAudioDeviceUnsupportedFormatError;
- }
-
- asbd.mSampleRate = rate;
- property_size = sizeof(asbd);
-
- // Set the new audio stream formats for the rest of the AUs...
- result = AudioUnitSetProperty(
- varispeed_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- result = AudioUnitSetProperty(
- output_unit_,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input,
- 0,
- &asbd,
- property_size);
-
- OSSTATUS_DCHECK(result == noErr, result);
- return result;
-}
-
-void AudioSynchronizedStream::AllocateInputData() {
- // Get the native number of input channels that the hardware supports.
- int hardware_channels = 0;
- bool got_hardware_channels = AudioManagerMac::GetDeviceChannels(
- input_id_, kAudioDevicePropertyScopeInput, &hardware_channels);
- if (!got_hardware_channels || hardware_channels > 2) {
- // Only mono and stereo are supported on the input side. When it fails to
- // get the native channel number or the native channel number is bigger
- // than 2, we open the device in stereo mode.
- hardware_channels = 2;
- }
-
- // Allocate storage for the AudioBufferList used for the
- // input data from the input AudioUnit.
- // We allocate enough space for with one AudioBuffer per channel.
- size_t malloc_size = offsetof(AudioBufferList, mBuffers[0]) +
- (sizeof(AudioBuffer) * hardware_channels);
-
- input_buffer_list_ = static_cast<AudioBufferList*>(malloc(malloc_size));
- input_buffer_list_->mNumberBuffers = hardware_channels;
-
- input_bus_ = AudioBus::Create(hardware_channels, hardware_buffer_size_);
- wrapper_bus_ = AudioBus::CreateWrapper(channels_);
- if (hardware_channels != params_.input_channels()) {
- ChannelLayout hardware_channel_layout =
- GuessChannelLayout(hardware_channels);
- ChannelLayout requested_channel_layout =
- GuessChannelLayout(params_.input_channels());
- channel_mixer_.reset(new ChannelMixer(hardware_channel_layout,
- requested_channel_layout));
- mixer_bus_ = AudioBus::Create(params_.input_channels(),
- hardware_buffer_size_);
- }
-
- // Allocate buffers for AudioBufferList.
- UInt32 buffer_size_bytes = input_bus_->frames() * sizeof(Float32);
- for (size_t i = 0; i < input_buffer_list_->mNumberBuffers; ++i) {
- input_buffer_list_->mBuffers[i].mNumberChannels = 1;
- input_buffer_list_->mBuffers[i].mDataByteSize = buffer_size_bytes;
- input_buffer_list_->mBuffers[i].mData = input_bus_->channel(i);
- }
-}
-
-OSStatus AudioSynchronizedStream::HandleInputCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- TRACE_EVENT0("audio", "AudioSynchronizedStream::HandleInputCallback");
-
- if (first_input_time_ < 0.0)
- first_input_time_ = time_stamp->mSampleTime;
-
- // Get the new audio input data.
- OSStatus result = AudioUnitRender(
- input_unit_,
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- input_buffer_list_);
-
- // TODO(xians): Add back the DCHECK after synchronize IO supports all
- // combination of input and output params. See http://issue/246521.
- if (result != noErr)
- return result;
-
- // Buffer input into FIFO.
- int available_frames = fifo_.max_frames() - fifo_.frames();
- if (input_bus_->frames() <= available_frames) {
- if (channel_mixer_) {
- channel_mixer_->Transform(input_bus_.get(), mixer_bus_.get());
- fifo_.Push(mixer_bus_.get());
- } else {
- fifo_.Push(input_bus_.get());
- }
- }
-
- return result;
-}
-
-OSStatus AudioSynchronizedStream::HandleVarispeedCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- // Create a wrapper bus on the AudioBufferList.
- WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
-
- if (fifo_.frames() < static_cast<int>(number_of_frames)) {
- // We don't DCHECK here, since this is a possible run-time condition
- // if the machine is bogged down.
- wrapper_bus_->Zero();
- return noErr;
- }
-
- // Read from the FIFO to feed the varispeed.
- fifo_.Consume(wrapper_bus_.get(), 0, number_of_frames);
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::HandleOutputCallback(
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- // Input callback hasn't run yet or we've suddenly changed sample-rates
- // -> silence.
- if (first_input_time_ < 0.0 ||
- static_cast<int>(number_of_frames) != params_.frames_per_buffer()) {
- ZeroBufferList(io_data);
- return noErr;
- }
-
- // Use the varispeed playback rate to offset small discrepancies
- // in hardware clocks, and also any differences in sample-rate
- // between input and output devices.
-
- // Calculate a varispeed rate scalar factor to compensate for drift between
- // input and output. We use the actual number of frames still in the FIFO
- // compared with the ideal value of |target_fifo_frames_|.
- int delta = fifo_.frames() - target_fifo_frames_;
-
- // Average |delta| because it can jitter back/forth quite frequently
- // by +/- the hardware buffer-size *if* the input and output callbacks are
- // happening at almost exactly the same time. Also, if the input and output
- // sample-rates are different then |delta| will jitter quite a bit due to
- // the rate conversion happening in the varispeed, plus the jittering of
- // the callbacks. The average value is what's important here.
- average_delta_ += (delta - average_delta_) * 0.1;
-
- // Compute a rate compensation which always attracts us back to the
- // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
- const double kCorrectionTimeSeconds = 0.1;
- double correction_time_frames = kCorrectionTimeSeconds * output_sample_rate_;
- fifo_rate_compensation_ =
- (correction_time_frames + average_delta_) / correction_time_frames;
-
- // Adjust for FIFO drift.
- OSStatus result = AudioUnitSetParameter(
- varispeed_unit_,
- kVarispeedParam_PlaybackRate,
- kAudioUnitScope_Global,
- 0,
- fifo_rate_compensation_,
- 0);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Render to the output using the varispeed.
- result = AudioUnitRender(
- varispeed_unit_,
- io_action_flags,
- time_stamp,
- 0,
- number_of_frames,
- io_data);
-
- OSSTATUS_DCHECK(result == noErr, result);
- if (result != noErr)
- return result;
-
- // Create a wrapper bus on the AudioBufferList.
- WrapBufferList(io_data, wrapper_bus_.get(), number_of_frames);
-
- // Process in-place!
- source_->OnMoreIOData(wrapper_bus_.get(),
- wrapper_bus_.get(),
- AudioBuffersState(0, 0));
-
- return noErr;
-}
-
-OSStatus AudioSynchronizedStream::InputProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleInputCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-OSStatus AudioSynchronizedStream::VarispeedProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleVarispeedCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-OSStatus AudioSynchronizedStream::OutputProc(
- void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data) {
- AudioSynchronizedStream* stream =
- static_cast<AudioSynchronizedStream*>(user_data);
- DCHECK(stream);
-
- return stream->HandleOutputCallback(
- io_action_flags,
- time_stamp,
- bus_number,
- number_of_frames,
- io_data);
-}
-
-void AudioSynchronizedStream::AudioDeviceInfo::Initialize(
- AudioDeviceID id, bool is_input) {
- id_ = id;
- is_input_ = is_input;
- if (id_ == kAudioDeviceUnknown)
- return;
-
- UInt32 property_size = sizeof(buffer_size_frames_);
-
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
- OSStatus result = AudioObjectGetPropertyData(
- id_,
- &pa,
- 0,
- 0,
- &property_size,
- &buffer_size_frames_);
-
- OSSTATUS_DCHECK(result == noErr, result);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/audio_synchronized_mac.h b/chromium/media/audio/mac/audio_synchronized_mac.h
deleted file mode 100644
index a6db48e3037..00000000000
--- a/chromium/media/audio/mac/audio_synchronized_mac.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
-
-#include <AudioToolbox/AudioToolbox.h>
-#include <AudioUnit/AudioUnit.h>
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/compiler_specific.h"
-#include "base/synchronization/lock.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_bus.h"
-#include "media/base/audio_fifo.h"
-
-namespace media {
-
-class AudioManagerMac;
-class ChannelMixer;
-
-// AudioSynchronizedStream allows arbitrary combinations of input and output
-// devices running off different clocks and using different drivers, with
-// potentially differing sample-rates. It implements AudioOutputStream
-// and shuttles its synchronized I/O data using AudioSourceCallback.
-//
-// It is required to first acquire the native sample rate of the selected
-// output device and then use the same rate when creating this object.
-//
-// ............................................................................
-// Theory of Operation:
-// .
-// INPUT THREAD . OUTPUT THREAD
-// +-----------------+ +------+ .
-// | Input AudioUnit | --> | | .
-// +-----------------+ | | .
-// | FIFO | .
-// | | +-----------+
-// | | -----> | Varispeed |
-// | | +-----------+
-// +------+ . |
-// . | +-----------+
-// . OnMoreIOData() --> | Output AU |
-// . +-----------+
-//
-// The input AudioUnit's InputProc is called on one thread which feeds the
-// FIFO. The output AudioUnit's OutputProc is called on a second thread
-// which pulls on the varispeed to get the current input data. The varispeed
-// handles mismatches between input and output sample-rate and also clock drift
-// between the input and output drivers. The varispeed consumes its data from
-// the FIFO and adjusts its rate dynamically according to the amount
-// of data buffered in the FIFO. If the FIFO starts getting too much data
-// buffered then the varispeed will speed up slightly to compensate
-// and similarly if the FIFO doesn't have enough data buffered then the
-// varispeed will slow down slightly.
-//
-// Finally, once the input data is available then OnMoreIOData() is called
-// which is given this input, and renders the output which is finally sent
-// to the Output AudioUnit.
-class AudioSynchronizedStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AudioSynchronizedStream(AudioManagerMac* manager,
- const AudioParameters& params,
- AudioDeviceID input_id,
- AudioDeviceID output_id);
-
- virtual ~AudioSynchronizedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
-
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- OSStatus SetInputDeviceAsCurrent(AudioDeviceID input_id);
- OSStatus SetOutputDeviceAsCurrent(AudioDeviceID output_id);
- AudioDeviceID GetInputDeviceID() { return input_info_.id_; }
- AudioDeviceID GetOutputDeviceID() { return output_info_.id_; }
-
- bool IsRunning();
-
- private:
- // Initialization.
- OSStatus CreateAudioUnits();
- OSStatus SetupInput(AudioDeviceID input_id);
- OSStatus EnableIO();
- OSStatus SetupOutput(AudioDeviceID output_id);
- OSStatus SetupCallbacks();
- OSStatus SetupStreamFormats();
- void AllocateInputData();
-
- // Handlers for the AudioUnit callbacks.
- OSStatus HandleInputCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus HandleVarispeedCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- OSStatus HandleOutputCallback(AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // AudioUnit callbacks.
- static OSStatus InputProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- static OSStatus VarispeedProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- static OSStatus OutputProc(void* user_data,
- AudioUnitRenderActionFlags* io_action_flags,
- const AudioTimeStamp* time_stamp,
- UInt32 bus_number,
- UInt32 number_of_frames,
- AudioBufferList* io_data);
-
- // Our creator.
- AudioManagerMac* manager_;
-
- // Client parameters.
- AudioParameters params_;
-
- double input_sample_rate_;
- double output_sample_rate_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Values used in Open().
- AudioDeviceID input_id_;
- AudioDeviceID output_id_;
-
- // The input AudioUnit renders its data here.
- AudioBufferList* input_buffer_list_;
-
- // Holds the actual data for |input_buffer_list_|.
- scoped_ptr<AudioBus> input_bus_;
-
- // Used to overlay AudioBufferLists.
- scoped_ptr<AudioBus> wrapper_bus_;
-
- class AudioDeviceInfo {
- public:
- AudioDeviceInfo()
- : id_(kAudioDeviceUnknown),
- is_input_(false),
- buffer_size_frames_(0) {}
- void Initialize(AudioDeviceID inID, bool isInput);
- bool IsInitialized() const { return id_ != kAudioDeviceUnknown; }
-
- AudioDeviceID id_;
- bool is_input_;
- UInt32 buffer_size_frames_;
- };
-
- AudioDeviceInfo input_info_;
- AudioDeviceInfo output_info_;
-
- // Used for input to output buffering.
- AudioFifo fifo_;
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- int target_fifo_frames_;
-
- // A running average of the measured delta between actual number of frames
- // in the FIFO versus |target_fifo_frames_|.
- double average_delta_;
-
- // A varispeed rate scalar which is calculated based on FIFO drift.
- double fifo_rate_compensation_;
-
- // AudioUnits.
- AudioUnit input_unit_;
- AudioUnit varispeed_unit_;
- AudioUnit output_unit_;
-
- double first_input_time_;
-
- bool is_running_;
- int hardware_buffer_size_;
- int channels_;
-
- // Channel mixer used to transform mono to stereo data. It is only created
- // if the input_hardware_channels is mono.
- scoped_ptr<ChannelMixer> channel_mixer_;
- scoped_ptr<AudioBus> mixer_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioSynchronizedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_SYNCHRONIZED_MAC_H_
diff --git a/chromium/media/audio/mac/audio_unified_mac.cc b/chromium/media/audio/mac/audio_unified_mac.cc
deleted file mode 100644
index d1dc007e6a8..00000000000
--- a/chromium/media/audio/mac/audio_unified_mac.cc
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/mac/audio_unified_mac.h"
-
-#include <CoreServices/CoreServices.h>
-
-#include "base/basictypes.h"
-#include "base/logging.h"
-#include "base/mac/mac_logging.h"
-#include "media/audio/mac/audio_manager_mac.h"
-
-namespace media {
-
-// TODO(crogers): support more than hard-coded stereo input.
-// Ideally we would like to receive this value as a constructor argument.
-static const int kDefaultInputChannels = 2;
-
-AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
- AudioManagerMac* manager, const AudioParameters& params)
- : manager_(manager),
- source_(NULL),
- client_input_channels_(kDefaultInputChannels),
- volume_(1.0f),
- input_channels_(0),
- output_channels_(0),
- input_channels_per_frame_(0),
- output_channels_per_frame_(0),
- io_proc_id_(0),
- device_(kAudioObjectUnknown),
- is_playing_(false) {
- DCHECK(manager_);
-
- // A frame is one sample across all channels. In interleaved audio the per
- // frame fields identify the set of n |channels|. In uncompressed audio, a
- // packet is always one frame.
- format_.mSampleRate = params.sample_rate();
- format_.mFormatID = kAudioFormatLinearPCM;
- format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
- kLinearPCMFormatFlagIsSignedInteger;
- format_.mBitsPerChannel = params.bits_per_sample();
- format_.mChannelsPerFrame = params.channels();
- format_.mFramesPerPacket = 1;
- format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
- format_.mBytesPerFrame = format_.mBytesPerPacket;
- format_.mReserved = 0;
-
- // Calculate the number of sample frames per callback.
- number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
-
- input_bus_ = AudioBus::Create(client_input_channels_,
- params.frames_per_buffer());
- output_bus_ = AudioBus::Create(params);
-}
-
-AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
- DCHECK_EQ(device_, kAudioObjectUnknown);
-}
-
-bool AudioHardwareUnifiedStream::Open() {
- // Obtain the current output device selected by the user.
- AudioObjectPropertyAddress pa;
- pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
- pa.mScope = kAudioObjectPropertyScopeGlobal;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- UInt32 size = sizeof(device_);
-
- OSStatus result = AudioObjectGetPropertyData(
- kAudioObjectSystemObject,
- &pa,
- 0,
- 0,
- &size,
- &device_);
-
- if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
- LOG(ERROR) << "Cannot open unified AudioDevice.";
- return false;
- }
-
- // The requested sample-rate must match the hardware sample-rate.
- Float64 sample_rate = 0.0;
- size = sizeof(sample_rate);
-
- pa.mSelector = kAudioDevicePropertyNominalSampleRate;
- pa.mScope = kAudioObjectPropertyScopeWildcard;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &sample_rate);
-
- if (result != noErr || sample_rate != format_.mSampleRate) {
- LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
- << " must match the hardware sample-rate: " << sample_rate;
- return false;
- }
-
- // Configure buffer frame size.
- UInt32 frame_size = number_of_frames_;
-
- pa.mSelector = kAudioDevicePropertyBufferFrameSize;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
- result = AudioObjectSetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- if (result != noErr) {
- LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
- return false;
- }
-
- pa.mScope = kAudioDevicePropertyScopeOutput;
- result = AudioObjectSetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- sizeof(frame_size),
- &frame_size);
-
- if (result != noErr) {
- LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
- return false;
- }
-
- DVLOG(1) << "Sample rate: " << sample_rate;
- DVLOG(1) << "Frame size: " << frame_size;
-
- // Determine the number of input and output channels.
- // We handle both the interleaved and non-interleaved cases.
-
- // Get input stream configuration.
- pa.mSelector = kAudioDevicePropertyStreamConfiguration;
- pa.mScope = kAudioDevicePropertyScopeInput;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr && size > 0) {
- // Allocate storage.
- scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
- AudioBufferList& input_list =
- *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &input_list);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- // Determine number of input channels.
- input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
- input_list.mBuffers[0].mNumberChannels : 0;
- if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
- // Non-interleaved.
- input_channels_ = input_list.mNumberBuffers;
- } else {
- // Interleaved.
- input_channels_ = input_channels_per_frame_;
- }
- }
- }
-
- DVLOG(1) << "Input channels: " << input_channels_;
- DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
-
- // The hardware must have at least the requested input channels.
- if (result != noErr || client_input_channels_ > input_channels_) {
- LOG(ERROR) << "AudioDevice does not support requested input channels.";
- return false;
- }
-
- // Get output stream configuration.
- pa.mSelector = kAudioDevicePropertyStreamConfiguration;
- pa.mScope = kAudioDevicePropertyScopeOutput;
- pa.mElement = kAudioObjectPropertyElementMaster;
-
- result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr && size > 0) {
- // Allocate storage.
- scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
- AudioBufferList& output_list =
- *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
-
- result = AudioObjectGetPropertyData(
- device_,
- &pa,
- 0,
- 0,
- &size,
- &output_list);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr) {
- // Determine number of output channels.
- output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
- if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
- // Non-interleaved.
- output_channels_ = output_list.mNumberBuffers;
- } else {
- // Interleaved.
- output_channels_ = output_channels_per_frame_;
- }
- }
- }
-
- DVLOG(1) << "Output channels: " << output_channels_;
- DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
-
- // The hardware must have at least the requested output channels.
- if (result != noErr ||
- output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
- LOG(ERROR) << "AudioDevice does not support requested output channels.";
- return false;
- }
-
- // Setup the I/O proc.
- result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
- if (result != noErr) {
- LOG(ERROR) << "Error creating IOProc.";
- return false;
- }
-
- return true;
-}
-
-void AudioHardwareUnifiedStream::Close() {
- DCHECK(!is_playing_);
-
- OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- io_proc_id_ = 0;
- device_ = kAudioObjectUnknown;
-
- // Inform the audio manager that we have been closed. This can cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
- DCHECK(callback);
- DCHECK_NE(device_, kAudioObjectUnknown);
- DCHECK(!is_playing_);
- if (device_ == kAudioObjectUnknown || is_playing_)
- return;
-
- source_ = callback;
-
- OSStatus result = AudioDeviceStart(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
-
- if (result == noErr)
- is_playing_ = true;
-}
-
-void AudioHardwareUnifiedStream::Stop() {
- if (!is_playing_)
- return;
-
- if (device_ != kAudioObjectUnknown) {
- OSStatus result = AudioDeviceStop(device_, io_proc_id_);
- OSSTATUS_DCHECK(result == noErr, result);
- }
-
- is_playing_ = false;
- source_ = NULL;
-}
-
-void AudioHardwareUnifiedStream::SetVolume(double volume) {
- volume_ = static_cast<float>(volume);
- // TODO(crogers): set volume property
-}
-
-void AudioHardwareUnifiedStream::GetVolume(double* volume) {
- *volume = volume_;
-}
-
-// Pulls on our provider with optional input, asking it to render output.
-// Note to future hackers of this function: Do not add locks here because this
-// is running on a real-time thread (for low-latency).
-OSStatus AudioHardwareUnifiedStream::Render(
- AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time) {
- // Convert the input data accounting for possible interleaving.
- // TODO(crogers): it's better to simply memcpy() if source is already planar.
- if (input_channels_ >= client_input_channels_) {
- for (int channel_index = 0; channel_index < client_input_channels_;
- ++channel_index) {
- float* source;
-
- int source_channel_index = channel_index;
-
- if (input_channels_per_frame_ > 1) {
- // Interleaved.
- source = static_cast<float*>(input_data->mBuffers[0].mData) +
- source_channel_index;
- } else {
- // Non-interleaved.
- source = static_cast<float*>(
- input_data->mBuffers[source_channel_index].mData);
- }
-
- float* p = input_bus_->channel(channel_index);
- for (int i = 0; i < number_of_frames_; ++i) {
- p[i] = *source;
- source += input_channels_per_frame_;
- }
- }
- } else if (input_channels_) {
- input_bus_->Zero();
- }
-
- // Give the client optional input data and have it render the output data.
- source_->OnMoreIOData(input_bus_.get(),
- output_bus_.get(),
- AudioBuffersState(0, 0));
-
- // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
-
- // Handle interleaving as necessary.
- // TODO(crogers): it's better to simply memcpy() if dest is already planar.
-
- for (int channel_index = 0;
- channel_index < static_cast<int>(format_.mChannelsPerFrame);
- ++channel_index) {
- float* dest;
-
- int dest_channel_index = channel_index;
-
- if (output_channels_per_frame_ > 1) {
- // Interleaved.
- dest = static_cast<float*>(output_data->mBuffers[0].mData) +
- dest_channel_index;
- } else {
- // Non-interleaved.
- dest = static_cast<float*>(
- output_data->mBuffers[dest_channel_index].mData);
- }
-
- float* p = output_bus_->channel(channel_index);
- for (int i = 0; i < number_of_frames_; ++i) {
- *dest = p[i];
- dest += output_channels_per_frame_;
- }
- }
-
- return noErr;
-}
-
-OSStatus AudioHardwareUnifiedStream::RenderProc(
- AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time,
- void* user_data) {
- AudioHardwareUnifiedStream* audio_output =
- static_cast<AudioHardwareUnifiedStream*>(user_data);
- DCHECK(audio_output);
- if (!audio_output)
- return -1;
-
- return audio_output->Render(
- device,
- now,
- input_data,
- input_time,
- output_data,
- output_time);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/mac/audio_unified_mac.h b/chromium/media/audio/mac/audio_unified_mac.h
deleted file mode 100644
index ff090e3be1a..00000000000
--- a/chromium/media/audio/mac/audio_unified_mac.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
-#define MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
-
-#include <CoreAudio/CoreAudio.h>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-
-namespace media {
-
-class AudioManagerMac;
-
-// Implementation of AudioOutputStream for Mac OS X using the
-// CoreAudio AudioHardware API suitable for low-latency unified audio I/O
-// when using devices which support *both* input and output
-// in the same driver. This is the case with professional
-// USB and Firewire devices.
-//
-// Please note that it's required to first get the native sample-rate of the
-// default output device and use that sample-rate when creating this object.
-class AudioHardwareUnifiedStream : public AudioOutputStream {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- AudioHardwareUnifiedStream(AudioManagerMac* manager,
- const AudioParameters& params);
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~AudioHardwareUnifiedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- int input_channels() const { return input_channels_; }
- int output_channels() const { return output_channels_; }
-
- private:
- OSStatus Render(AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time);
-
- static OSStatus RenderProc(AudioDeviceID device,
- const AudioTimeStamp* now,
- const AudioBufferList* input_data,
- const AudioTimeStamp* input_time,
- AudioBufferList* output_data,
- const AudioTimeStamp* output_time,
- void* user_data);
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerMac* manager_;
-
- // Pointer to the object that will provide the audio samples.
- AudioSourceCallback* source_;
-
- // Structure that holds the stream format details such as bitrate.
- AudioStreamBasicDescription format_;
-
- // Hardware buffer size.
- int number_of_frames_;
-
- // Number of audio channels provided to the client via OnMoreIOData().
- int client_input_channels_;
-
- // Volume level from 0 to 1.
- float volume_;
-
- // Number of input and output channels queried from the hardware.
- int input_channels_;
- int output_channels_;
- int input_channels_per_frame_;
- int output_channels_per_frame_;
-
- AudioDeviceIOProcID io_proc_id_;
- AudioDeviceID device_;
- bool is_playing_;
-
- // Intermediate buffers used with call to OnMoreIOData().
- scoped_ptr<AudioBus> input_bus_;
- scoped_ptr<AudioBus> output_bus_;
-
- DISALLOW_COPY_AND_ASSIGN(AudioHardwareUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_MAC_AUDIO_UNIFIED_MAC_H_
diff --git a/chromium/media/audio/mock_audio_manager.cc b/chromium/media/audio/mock_audio_manager.cc
index f2074d65357..7183405a190 100644
--- a/chromium/media/audio/mock_audio_manager.cc
+++ b/chromium/media/audio/mock_audio_manager.cc
@@ -5,14 +5,14 @@
#include "media/audio/mock_audio_manager.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_parameters.h"
namespace media {
-MockAudioManager::MockAudioManager(base::MessageLoopProxy* message_loop_proxy)
- : message_loop_proxy_(message_loop_proxy) {
-}
+MockAudioManager::MockAudioManager(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner) {}
MockAudioManager::~MockAudioManager() {
}
@@ -34,11 +34,8 @@ void MockAudioManager::ShowAudioInputSettings() {
void MockAudioManager::GetAudioInputDeviceNames(
AudioDeviceNames* device_names) {
- DCHECK(device_names->empty());
- device_names->push_back(media::AudioDeviceName("fake_device_name_1",
- "fake_device_id_1"));
- device_names->push_back(media::AudioDeviceName("fake_device_name_2",
- "fake_device_id_2"));
+ // Do not inject fake devices here, use
+ // AudioInputDeviceManager::GetFakeDeviceNames() instead.
}
void MockAudioManager::GetAudioOutputDeviceNames(
@@ -47,16 +44,14 @@ void MockAudioManager::GetAudioOutputDeviceNames(
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStream(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
NOTREACHED();
return NULL;
}
media::AudioOutputStream* MockAudioManager::MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
NOTREACHED();
return NULL;
}
@@ -68,12 +63,13 @@ media::AudioInputStream* MockAudioManager::MakeAudioInputStream(
return NULL;
}
-scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetMessageLoop() {
- return message_loop_proxy_;
+scoped_refptr<base::SingleThreadTaskRunner> MockAudioManager::GetTaskRunner() {
+ return task_runner_;
}
-scoped_refptr<base::MessageLoopProxy> MockAudioManager::GetWorkerLoop() {
- return message_loop_proxy_;
+scoped_refptr<base::SingleThreadTaskRunner>
+MockAudioManager::GetWorkerTaskRunner() {
+ return task_runner_;
}
void MockAudioManager::AddOutputDeviceChangeListener(
@@ -108,6 +104,4 @@ scoped_ptr<AudioLog> MockAudioManager::CreateAudioLog(
return scoped_ptr<AudioLog>();
}
-void MockAudioManager::FixWedgedAudio() {}
-
} // namespace media.
diff --git a/chromium/media/audio/mock_audio_manager.h b/chromium/media/audio/mock_audio_manager.h
index 2d71fe8493f..520205d21ba 100644
--- a/chromium/media/audio/mock_audio_manager.h
+++ b/chromium/media/audio/mock_audio_manager.h
@@ -21,7 +21,8 @@ namespace media {
// synchronization purposes).
class MockAudioManager : public media::AudioManager {
public:
- explicit MockAudioManager(base::MessageLoopProxy* message_loop_proxy);
+ explicit MockAudioManager(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
virtual bool HasAudioOutputDevices() OVERRIDE;
@@ -39,20 +40,19 @@ class MockAudioManager : public media::AudioManager {
virtual media::AudioOutputStream* MakeAudioOutputStream(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual media::AudioOutputStream* MakeAudioOutputStreamProxy(
const media::AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual media::AudioInputStream* MakeAudioInputStream(
const media::AudioParameters& params,
const std::string& device_id) OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetMessageLoop() OVERRIDE;
- virtual scoped_refptr<base::MessageLoopProxy> GetWorkerLoop() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetTaskRunner() OVERRIDE;
+ virtual scoped_refptr<base::SingleThreadTaskRunner> GetWorkerTaskRunner()
+ OVERRIDE;
virtual void AddOutputDeviceChangeListener(
AudioDeviceListener* listener) OVERRIDE;
@@ -70,13 +70,11 @@ class MockAudioManager : public media::AudioManager {
virtual scoped_ptr<AudioLog> CreateAudioLog(
AudioLogFactory::AudioComponent component) OVERRIDE;
- virtual void FixWedgedAudio() OVERRIDE;
-
protected:
virtual ~MockAudioManager();
private:
- scoped_refptr<base::MessageLoopProxy> message_loop_proxy_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(MockAudioManager);
};
diff --git a/chromium/media/audio/mock_audio_source_callback.cc b/chromium/media/audio/mock_audio_source_callback.cc
new file mode 100644
index 00000000000..da2be1cfa41
--- /dev/null
+++ b/chromium/media/audio/mock_audio_source_callback.cc
@@ -0,0 +1,12 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/mock_audio_source_callback.h"
+
+namespace media {
+
+MockAudioSourceCallback::MockAudioSourceCallback() {}
+MockAudioSourceCallback::~MockAudioSourceCallback() {}
+
+} // namespace media
diff --git a/chromium/media/audio/mock_audio_source_callback.h b/chromium/media/audio/mock_audio_source_callback.h
new file mode 100644
index 00000000000..d24ce44c5f8
--- /dev/null
+++ b/chromium/media/audio/mock_audio_source_callback.h
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_MOCK_AUDIO_SOURCE_CALLBACK_H_
+#define MEDIA_AUDIO_MOCK_AUDIO_SOURCE_CALLBACK_H_
+
+#include "media/audio/audio_io.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace media {
+
+class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
+ public:
+ MockAudioSourceCallback();
+ virtual ~MockAudioSourceCallback();
+
+ MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
+ AudioBuffersState buffers_state));
+ MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockAudioSourceCallback);
+};
+
+} // namespace media
+
+#endif // MEDIA_AUDIO_MOCK_AUDIO_SOURCE_CALLBACK_H_
diff --git a/chromium/media/audio/null_audio_sink.cc b/chromium/media/audio/null_audio_sink.cc
index 607d7d861e2..dfd07fcee6a 100644
--- a/chromium/media/audio/null_audio_sink.cc
+++ b/chromium/media/audio/null_audio_sink.cc
@@ -5,18 +5,18 @@
#include "media/audio/null_audio_sink.h"
#include "base/bind.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/fake_audio_consumer.h"
#include "media/base/audio_hash.h"
namespace media {
NullAudioSink::NullAudioSink(
- const scoped_refptr<base::MessageLoopProxy>& message_loop)
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
: initialized_(false),
playing_(false),
callback_(NULL),
- message_loop_(message_loop) {
+ task_runner_(task_runner) {
}
NullAudioSink::~NullAudioSink() {}
@@ -24,18 +24,18 @@ NullAudioSink::~NullAudioSink() {}
void NullAudioSink::Initialize(const AudioParameters& params,
RenderCallback* callback) {
DCHECK(!initialized_);
- fake_consumer_.reset(new FakeAudioConsumer(message_loop_, params));
+ fake_consumer_.reset(new FakeAudioConsumer(task_runner_, params));
callback_ = callback;
initialized_ = true;
}
void NullAudioSink::Start() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(!playing_);
}
void NullAudioSink::Stop() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
// Stop may be called at any time, so we have to check before stopping.
if (fake_consumer_)
@@ -43,7 +43,7 @@ void NullAudioSink::Stop() {
}
void NullAudioSink::Play() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
DCHECK(initialized_);
if (playing_)
@@ -55,7 +55,7 @@ void NullAudioSink::Play() {
}
void NullAudioSink::Pause() {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
if (!playing_)
return;
@@ -70,7 +70,7 @@ bool NullAudioSink::SetVolume(double volume) {
}
void NullAudioSink::CallRender(AudioBus* audio_bus) {
- DCHECK(message_loop_->BelongsToCurrentThread());
+ DCHECK(task_runner_->BelongsToCurrentThread());
int frames_received = callback_->Render(audio_bus, 0);
if (!audio_hash_ || frames_received <= 0)
diff --git a/chromium/media/audio/null_audio_sink.h b/chromium/media/audio/null_audio_sink.h
index 072414606ff..c28a2cfac06 100644
--- a/chromium/media/audio/null_audio_sink.h
+++ b/chromium/media/audio/null_audio_sink.h
@@ -11,7 +11,7 @@
#include "media/base/audio_renderer_sink.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -22,7 +22,7 @@ class FakeAudioConsumer;
class MEDIA_EXPORT NullAudioSink
: NON_EXPORTED_BASE(public AudioRendererSink) {
public:
- NullAudioSink(const scoped_refptr<base::MessageLoopProxy>& message_loop);
+ NullAudioSink(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
// AudioRendererSink implementation.
virtual void Initialize(const AudioParameters& params,
@@ -53,7 +53,7 @@ class MEDIA_EXPORT NullAudioSink
// Controls whether or not a running hash is computed for audio frames.
scoped_ptr<AudioHash> audio_hash_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
scoped_ptr<FakeAudioConsumer> fake_consumer_;
DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.cc b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
index b378b02d0cd..ec482d8b7cd 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.cc
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.cc
@@ -59,9 +59,13 @@ AudioParameters AudioManagerOpenBSD::GetInputStreamParameters(
const std::string& device_id) {
static const int kDefaultInputBufferSize = 1024;
+ int user_buffer_size = GetUserBufferSize();
+ int buffer_size = user_buffer_size ?
+ user_buffer_size : kDefaultInputBufferSize;
+
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- kDefaultSampleRate, 16, kDefaultInputBufferSize);
+ kDefaultSampleRate, 16, buffer_size);
}
AudioManagerOpenBSD::AudioManagerOpenBSD(AudioLogFactory* audio_log_factory)
@@ -92,8 +96,7 @@ AudioOutputStream* AudioManagerOpenBSD::MakeLinearOutputStream(
AudioOutputStream* AudioManagerOpenBSD::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format);
return MakeOutputStream(params);
diff --git a/chromium/media/audio/openbsd/audio_manager_openbsd.h b/chromium/media/audio/openbsd/audio_manager_openbsd.h
index 113f5915ae1..53b7dfb725f 100644
--- a/chromium/media/audio/openbsd/audio_manager_openbsd.h
+++ b/chromium/media/audio/openbsd/audio_manager_openbsd.h
@@ -27,8 +27,7 @@ class MEDIA_EXPORT AudioManagerOpenBSD : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.cc b/chromium/media/audio/pulse/audio_manager_pulse.cc
index d369d135bef..412f2a421b0 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.cc
+++ b/chromium/media/audio/pulse/audio_manager_pulse.cc
@@ -10,11 +10,12 @@
#include "base/logging.h"
#include "base/nix/xdg_util.h"
#include "base/stl_util.h"
+#if defined(USE_ALSA)
#include "media/audio/alsa/audio_manager_alsa.h"
+#endif
#include "media/audio/audio_parameters.h"
#include "media/audio/pulse/pulse_input.h"
#include "media/audio/pulse/pulse_output.h"
-#include "media/audio/pulse/pulse_unified.h"
#include "media/audio/pulse/pulse_util.h"
#include "media/base/channel_layout.h"
@@ -34,6 +35,13 @@ using pulse::WaitForOperationCompletion;
// Maximum number of output streams that can be open simultaneously.
static const int kMaxOutputStreams = 50;
+// Define bounds for the output buffer size.
+static const int kMinimumOutputBufferSize = 512;
+static const int kMaximumOutputBufferSize = 8192;
+
+// Default input buffer size.
+static const int kDefaultInputBufferSize = 1024;
+
static const base::FilePath::CharType kPulseLib[] =
FILE_PATH_LITERAL("libpulse.so.0");
@@ -78,7 +86,9 @@ bool AudioManagerPulse::HasAudioInputDevices() {
}
void AudioManagerPulse::ShowAudioInputSettings() {
+#if defined(USE_ALSA)
AudioManagerAlsa::ShowLinuxAudioInputSettings();
+#endif
}
void AudioManagerPulse::GetAudioDeviceNames(
@@ -118,27 +128,29 @@ void AudioManagerPulse::GetAudioOutputDeviceNames(
AudioParameters AudioManagerPulse::GetInputStreamParameters(
const std::string& device_id) {
- static const int kDefaultInputBufferSize = 1024;
+ int user_buffer_size = GetUserBufferSize();
+ int buffer_size = user_buffer_size ?
+ user_buffer_size : kDefaultInputBufferSize;
// TODO(xians): add support for querying native channel layout for pulse.
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
- GetNativeSampleRate(), 16, kDefaultInputBufferSize);
+ GetNativeSampleRate(), 16, buffer_size);
}
AudioOutputStream* AudioManagerPulse::MakeLinearOutputStream(
const AudioParameters& params) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LINEAR, params.format());
- return MakeOutputStream(params, std::string());
+ return MakeOutputStream(params, AudioManagerBase::kDefaultDeviceId);
}
AudioOutputStream* AudioManagerPulse::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
- DLOG_IF(ERROR, !device_id.empty()) << "Not implemented!";
+ const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
- return MakeOutputStream(params, input_device_id);
+ return MakeOutputStream(
+ params,
+ device_id.empty() ? AudioManagerBase::kDefaultDeviceId : device_id);
}
AudioInputStream* AudioManagerPulse::MakeLinearInputStream(
@@ -157,11 +169,10 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
// TODO(tommi): Support |output_device_id|.
- DLOG_IF(ERROR, !output_device_id.empty()) << "Not implemented!";
- static const int kDefaultOutputBufferSize = 512;
+ VLOG_IF(0, !output_device_id.empty()) << "Not implemented!";
ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- int buffer_size = kDefaultOutputBufferSize;
+ int buffer_size = kMinimumOutputBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
int sample_rate;
@@ -169,7 +180,9 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
bits_per_sample = input_params.bits_per_sample();
channel_layout = input_params.channel_layout();
input_channels = input_params.input_channels();
- buffer_size = std::min(buffer_size, input_params.frames_per_buffer());
+ buffer_size =
+ std::min(kMaximumOutputBufferSize,
+ std::max(buffer_size, input_params.frames_per_buffer()));
sample_rate = input_params.sample_rate();
} else {
sample_rate = GetNativeSampleRate();
@@ -185,12 +198,10 @@ AudioParameters AudioManagerPulse::GetPreferredOutputStreamParameters(
}
AudioOutputStream* AudioManagerPulse::MakeOutputStream(
- const AudioParameters& params, const std::string& input_device_id) {
- if (params.input_channels()) {
- return new PulseAudioUnifiedStream(params, input_device_id, this);
- }
-
- return new PulseAudioOutputStream(params, this);
+ const AudioParameters& params,
+ const std::string& device_id) {
+ DCHECK(!device_id.empty());
+ return new PulseAudioOutputStream(params, device_id, this);
}
AudioInputStream* AudioManagerPulse::MakeInputStream(
@@ -219,7 +230,7 @@ bool AudioManagerPulse::Init() {
// Check if the pulse library is avialbale.
paths[kModulePulse].push_back(kPulseLib);
if (!InitializeStubs(paths)) {
- DLOG(WARNING) << "Failed on loading the Pulse library and symbols";
+ VLOG(1) << "Failed on loading the Pulse library and symbols";
return false;
}
#endif // defined(DLOPEN_PULSEAUDIO)
@@ -247,8 +258,8 @@ bool AudioManagerPulse::Init() {
pa_context_set_state_callback(input_context_, &pulse::ContextStateCallback,
input_mainloop_);
if (pa_context_connect(input_context_, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL)) {
- DLOG(ERROR) << "Failed to connect to the context. Error: "
- << pa_strerror(pa_context_errno(input_context_));
+ VLOG(0) << "Failed to connect to the context. Error: "
+ << pa_strerror(pa_context_errno(input_context_));
return false;
}
diff --git a/chromium/media/audio/pulse/audio_manager_pulse.h b/chromium/media/audio/pulse/audio_manager_pulse.h
index 45fb8cb56fa..150ea51a3e9 100644
--- a/chromium/media/audio/pulse/audio_manager_pulse.h
+++ b/chromium/media/audio/pulse/audio_manager_pulse.h
@@ -37,8 +37,7 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -71,7 +70,7 @@ class MEDIA_EXPORT AudioManagerPulse : public AudioManagerBase {
// Called by MakeLinearOutputStream and MakeLowLatencyOutputStream.
AudioOutputStream* MakeOutputStream(const AudioParameters& params,
- const std::string& input_device_id);
+ const std::string& device_id);
// Called by MakeLinearInputStream and MakeLowLatencyInputStream.
AudioInputStream* MakeInputStream(const AudioParameters& params,
diff --git a/chromium/media/audio/pulse/pulse.sigs b/chromium/media/audio/pulse/pulse.sigs
index b5d927c754c..8d2dab70c60 100644
--- a/chromium/media/audio/pulse/pulse.sigs
+++ b/chromium/media/audio/pulse/pulse.sigs
@@ -1,10 +1,10 @@
-# Copyright 2013 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
-#------------------------------------------------
-# Functions from pulse used in media code.
-#------------------------------------------------
+//------------------------------------------------
+// Functions from pulse used in media code.
+//------------------------------------------------
pa_mainloop_api* pa_threaded_mainloop_get_api(pa_threaded_mainloop* m);
void pa_threaded_mainloop_free(pa_threaded_mainloop* m);
pa_threaded_mainloop* pa_threaded_mainloop_new();
diff --git a/chromium/media/audio/pulse/pulse_input.cc b/chromium/media/audio/pulse/pulse_input.cc
index 54dfc1e05ab..d5cb94ece22 100644
--- a/chromium/media/audio/pulse/pulse_input.cc
+++ b/chromium/media/audio/pulse/pulse_input.cc
@@ -34,6 +34,8 @@ PulseAudioInputStream::PulseAudioInputStream(AudioManagerPulse* audio_manager,
context_state_changed_(false) {
DCHECK(mainloop);
DCHECK(context);
+ CHECK(params_.IsValid());
+ audio_bus_ = AudioBus::Create(params_);
}
PulseAudioInputStream::~PulseAudioInputStream() {
@@ -105,6 +107,7 @@ void PulseAudioInputStream::Stop() {
operation = pa_stream_cork(handle_, 1, &pulse::StreamSuccessCallback,
pa_mainloop_);
WaitForOperationCompletion(pa_mainloop_, operation);
+ callback_ = NULL;
}
void PulseAudioInputStream::Close() {
@@ -125,9 +128,6 @@ void PulseAudioInputStream::Close() {
}
}
- if (callback_)
- callback_->OnClose(this);
-
// Signal to the manager that we're closed and can be removed.
// This should be the last call in the function as it deletes "this".
audio_manager_->ReleaseInputStream(this);
@@ -274,8 +274,11 @@ void PulseAudioInputStream::ReadData() {
int packet_size = params_.GetBytesPerBuffer();
while (buffer_->forward_bytes() >= packet_size) {
buffer_->Read(audio_data_buffer_.get(), packet_size);
- callback_->OnData(this, audio_data_buffer_.get(), packet_size,
- hardware_delay, normalized_volume);
+ audio_bus_->FromInterleaved(audio_data_buffer_.get(),
+ audio_bus_->frames(),
+ params_.bits_per_sample() / 8);
+ callback_->OnData(
+ this, audio_bus_.get(), hardware_delay, normalized_volume);
if (buffer_->forward_bytes() < packet_size)
break;
diff --git a/chromium/media/audio/pulse/pulse_input.h b/chromium/media/audio/pulse/pulse_input.h
index 7566eacf10b..7e64bb296a8 100644
--- a/chromium/media/audio/pulse/pulse_input.h
+++ b/chromium/media/audio/pulse/pulse_input.h
@@ -75,6 +75,8 @@ class PulseAudioInputStream : public AgcAudioStream<AudioInputStream> {
// Flag indicating the state of the context has been changed.
bool context_state_changed_;
+ scoped_ptr<AudioBus> audio_bus_;
+
base::ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(PulseAudioInputStream);
diff --git a/chromium/media/audio/pulse/pulse_output.cc b/chromium/media/audio/pulse/pulse_output.cc
index c40d4f65051..19fc47b8be5 100644
--- a/chromium/media/audio/pulse/pulse_output.cc
+++ b/chromium/media/audio/pulse/pulse_output.cc
@@ -6,7 +6,7 @@
#include <pulse/pulseaudio.h>
-#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager_base.h"
#include "media/audio/audio_parameters.h"
#include "media/audio/pulse/pulse_util.h"
@@ -39,16 +39,16 @@ void PulseAudioOutputStream::StreamRequestCallback(pa_stream* s, size_t len,
}
PulseAudioOutputStream::PulseAudioOutputStream(const AudioParameters& params,
+ const std::string& device_id,
AudioManagerBase* manager)
: params_(params),
+ device_id_(device_id),
manager_(manager),
pa_context_(NULL),
pa_mainloop_(NULL),
pa_stream_(NULL),
volume_(1.0f),
source_callback_(NULL) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
CHECK(params_.IsValid());
audio_bus_ = AudioBus::Create(params_);
}
@@ -62,9 +62,9 @@ PulseAudioOutputStream::~PulseAudioOutputStream() {
}
bool PulseAudioOutputStream::Open() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
return pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &pa_stream_,
- params_, &StreamNotifyCallback,
+ params_, device_id_, &StreamNotifyCallback,
&StreamRequestCallback, this);
}
@@ -107,7 +107,7 @@ void PulseAudioOutputStream::Reset() {
}
void PulseAudioOutputStream::Close() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
Reset();
@@ -157,7 +157,7 @@ void PulseAudioOutputStream::FulfillWriteRequest(size_t requested_bytes) {
}
void PulseAudioOutputStream::Start(AudioSourceCallback* callback) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
CHECK(callback);
CHECK(pa_stream_);
@@ -179,7 +179,7 @@ void PulseAudioOutputStream::Start(AudioSourceCallback* callback) {
}
void PulseAudioOutputStream::Stop() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
// Cork (pause) the stream. Waiting for the main loop lock will ensure
// outstanding callbacks have completed.
@@ -202,13 +202,13 @@ void PulseAudioOutputStream::Stop() {
}
void PulseAudioOutputStream::SetVolume(double volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
volume_ = static_cast<float>(volume);
}
void PulseAudioOutputStream::GetVolume(double* volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(thread_checker_.CalledOnValidThread());
*volume = volume_;
}
diff --git a/chromium/media/audio/pulse/pulse_output.h b/chromium/media/audio/pulse/pulse_output.h
index 583cce7e5bd..e1c00455563 100644
--- a/chromium/media/audio/pulse/pulse_output.h
+++ b/chromium/media/audio/pulse/pulse_output.h
@@ -20,7 +20,10 @@
#ifndef MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
#define MEDIA_AUDIO_PULSE_PULSE_OUTPUT_H_
+#include <string>
+
#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread_checker.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_parameters.h"
@@ -35,6 +38,7 @@ class AudioManagerBase;
class PulseAudioOutputStream : public AudioOutputStream {
public:
PulseAudioOutputStream(const AudioParameters& params,
+ const std::string& device_id,
AudioManagerBase* manager);
virtual ~PulseAudioOutputStream();
@@ -66,6 +70,9 @@ class PulseAudioOutputStream : public AudioOutputStream {
// AudioParameters from the constructor.
const AudioParameters params_;
+ // The device ID for the device to open.
+ const std::string device_id_;
+
// Audio manager that created us. Used to report that we've closed.
AudioManagerBase* manager_;
@@ -84,6 +91,8 @@ class PulseAudioOutputStream : public AudioOutputStream {
// Container for retrieving data from AudioSourceCallback::OnMoreData().
scoped_ptr<AudioBus> audio_bus_;
+ base::ThreadChecker thread_checker_;
+
DISALLOW_COPY_AND_ASSIGN(PulseAudioOutputStream);
};
diff --git a/chromium/media/audio/pulse/pulse_unified.cc b/chromium/media/audio/pulse/pulse_unified.cc
deleted file mode 100644
index c68a797469f..00000000000
--- a/chromium/media/audio/pulse/pulse_unified.cc
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/pulse/pulse_unified.h"
-
-#include "base/message_loop/message_loop.h"
-#include "base/time/time.h"
-#include "media/audio/audio_manager_base.h"
-#include "media/audio/audio_parameters.h"
-#include "media/audio/pulse/pulse_util.h"
-#include "media/base/seekable_buffer.h"
-
-namespace media {
-
-using pulse::AutoPulseLock;
-using pulse::WaitForOperationCompletion;
-
-static const int kFifoSizeInPackets = 10;
-
-// static, pa_stream_notify_cb
-void PulseAudioUnifiedStream::StreamNotifyCallback(pa_stream* s,
- void* user_data) {
- PulseAudioUnifiedStream* stream =
- static_cast<PulseAudioUnifiedStream*>(user_data);
-
- // Forward unexpected failures to the AudioSourceCallback if available. All
- // these variables are only modified under pa_threaded_mainloop_lock() so this
- // should be thread safe.
- if (s && stream->source_callback_ &&
- pa_stream_get_state(s) == PA_STREAM_FAILED) {
- stream->source_callback_->OnError(stream);
- }
-
- pa_threaded_mainloop_signal(stream->pa_mainloop_, 0);
-}
-
-// static, used by pa_stream_set_read_callback.
-void PulseAudioUnifiedStream::ReadCallback(pa_stream* handle, size_t length,
- void* user_data) {
- static_cast<PulseAudioUnifiedStream*>(user_data)->ReadData();
-}
-
-PulseAudioUnifiedStream::PulseAudioUnifiedStream(
- const AudioParameters& params,
- const std::string& input_device_id,
- AudioManagerBase* manager)
- : params_(params),
- input_device_id_(input_device_id),
- manager_(manager),
- pa_context_(NULL),
- pa_mainloop_(NULL),
- input_stream_(NULL),
- output_stream_(NULL),
- volume_(1.0f),
- source_callback_(NULL) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- CHECK(params_.IsValid());
- input_bus_ = AudioBus::Create(params_);
- output_bus_ = AudioBus::Create(params_);
-}
-
-PulseAudioUnifiedStream::~PulseAudioUnifiedStream() {
- // All internal structures should already have been freed in Close(), which
- // calls AudioManagerBase::ReleaseOutputStream() which deletes this object.
- DCHECK(!input_stream_);
- DCHECK(!output_stream_);
- DCHECK(!pa_context_);
- DCHECK(!pa_mainloop_);
-}
-
-bool PulseAudioUnifiedStream::Open() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- // Prepare the recording buffers for the callbacks.
- fifo_.reset(new media::SeekableBuffer(
- 0, kFifoSizeInPackets * params_.GetBytesPerBuffer()));
- input_data_buffer_.reset(new uint8[params_.GetBytesPerBuffer()]);
-
- if (!pulse::CreateOutputStream(&pa_mainloop_, &pa_context_, &output_stream_,
- params_, &StreamNotifyCallback, NULL, this))
- return false;
-
- if (!pulse::CreateInputStream(pa_mainloop_, pa_context_, &input_stream_,
- params_, input_device_id_,
- &StreamNotifyCallback, this))
- return false;
-
- DCHECK(pa_mainloop_);
- DCHECK(pa_context_);
- DCHECK(input_stream_);
- DCHECK(output_stream_);
- return true;
-}
-
-void PulseAudioUnifiedStream::Reset() {
- if (!pa_mainloop_) {
- DCHECK(!input_stream_);
- DCHECK(!output_stream_);
- DCHECK(!pa_context_);
- return;
- }
-
- {
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Close the input stream.
- if (input_stream_) {
- // Disable all the callbacks before disconnecting.
- pa_stream_set_state_callback(input_stream_, NULL, NULL);
- pa_stream_flush(input_stream_, NULL, NULL);
- pa_stream_disconnect(input_stream_);
-
- // Release PulseAudio structures.
- pa_stream_unref(input_stream_);
- input_stream_ = NULL;
- }
-
- // Close the ouput stream.
- if (output_stream_) {
- // Release PulseAudio output stream structures.
- pa_stream_set_state_callback(output_stream_, NULL, NULL);
- pa_stream_disconnect(output_stream_);
- pa_stream_unref(output_stream_);
- output_stream_ = NULL;
- }
-
- if (pa_context_) {
- pa_context_disconnect(pa_context_);
- pa_context_set_state_callback(pa_context_, NULL, NULL);
- pa_context_unref(pa_context_);
- pa_context_ = NULL;
- }
- }
-
- pa_threaded_mainloop_stop(pa_mainloop_);
- pa_threaded_mainloop_free(pa_mainloop_);
- pa_mainloop_ = NULL;
-}
-
-void PulseAudioUnifiedStream::Close() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- Reset();
-
- // Signal to the manager that we're closed and can be removed.
- // This should be the last call in the function as it deletes "this".
- manager_->ReleaseOutputStream(this);
-}
-
-void PulseAudioUnifiedStream::WriteData(size_t requested_bytes) {
- CHECK_EQ(requested_bytes, static_cast<size_t>(params_.GetBytesPerBuffer()));
-
- void* buffer = NULL;
- int frames_filled = 0;
- if (source_callback_) {
- CHECK_GE(pa_stream_begin_write(
- output_stream_, &buffer, &requested_bytes), 0);
- uint32 hardware_delay = pulse::GetHardwareLatencyInBytes(
- output_stream_, params_.sample_rate(),
- params_.GetBytesPerFrame());
- fifo_->Read(input_data_buffer_.get(), requested_bytes);
- input_bus_->FromInterleaved(
- input_data_buffer_.get(), params_.frames_per_buffer(), 2);
-
- frames_filled = source_callback_->OnMoreIOData(
- input_bus_.get(),
- output_bus_.get(),
- AudioBuffersState(0, hardware_delay));
- }
-
- // Zero the unfilled data so it plays back as silence.
- if (frames_filled < output_bus_->frames()) {
- output_bus_->ZeroFramesPartial(
- frames_filled, output_bus_->frames() - frames_filled);
- }
-
- // Note: If this ever changes to output raw float the data must be clipped
- // and sanitized since it may come from an untrusted source such as NaCl.
- output_bus_->Scale(volume_);
- output_bus_->ToInterleaved(
- output_bus_->frames(), params_.bits_per_sample() / 8, buffer);
-
- if (pa_stream_write(output_stream_, buffer, requested_bytes, NULL, 0LL,
- PA_SEEK_RELATIVE) < 0) {
- if (source_callback_) {
- source_callback_->OnError(this);
- }
- }
-}
-
-void PulseAudioUnifiedStream::ReadData() {
- do {
- size_t length = 0;
- const void* data = NULL;
- pa_stream_peek(input_stream_, &data, &length);
- if (!data || length == 0)
- break;
-
- fifo_->Append(reinterpret_cast<const uint8*>(data), length);
-
- // Deliver the recording data to the renderer and drive the playout.
- int packet_size = params_.GetBytesPerBuffer();
- while (fifo_->forward_bytes() >= packet_size) {
- WriteData(packet_size);
- }
-
- // Checks if we still have data.
- pa_stream_drop(input_stream_);
- } while (pa_stream_readable_size(input_stream_) > 0);
-
- pa_threaded_mainloop_signal(pa_mainloop_, 0);
-}
-
-void PulseAudioUnifiedStream::Start(AudioSourceCallback* callback) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
- CHECK(callback);
- CHECK(input_stream_);
- CHECK(output_stream_);
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Ensure the context and stream are ready.
- if (pa_context_get_state(pa_context_) != PA_CONTEXT_READY &&
- pa_stream_get_state(output_stream_) != PA_STREAM_READY &&
- pa_stream_get_state(input_stream_) != PA_STREAM_READY) {
- callback->OnError(this);
- return;
- }
-
- source_callback_ = callback;
-
- fifo_->Clear();
-
- // Uncork (resume) the input stream.
- pa_stream_set_read_callback(input_stream_, &ReadCallback, this);
- pa_stream_readable_size(input_stream_);
- pa_operation* operation = pa_stream_cork(input_stream_, 0, NULL, NULL);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- // Uncork (resume) the output stream.
- // We use the recording stream to drive the playback, so we do not need to
- // register the write callback using pa_stream_set_write_callback().
- operation = pa_stream_cork(output_stream_, 0,
- &pulse::StreamSuccessCallback, pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-}
-
-void PulseAudioUnifiedStream::Stop() {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- // Cork (pause) the stream. Waiting for the main loop lock will ensure
- // outstanding callbacks have completed.
- AutoPulseLock auto_lock(pa_mainloop_);
-
- // Set |source_callback_| to NULL so all FulfillWriteRequest() calls which may
- // occur while waiting on the flush and cork exit immediately.
- source_callback_ = NULL;
-
- // Set the read callback to NULL before flushing the stream, otherwise it
- // will cause deadlock on the operation.
- pa_stream_set_read_callback(input_stream_, NULL, NULL);
- pa_operation* operation = pa_stream_flush(
- input_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- operation = pa_stream_cork(input_stream_, 1, &pulse::StreamSuccessCallback,
- pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- // Flush the stream prior to cork, doing so after will cause hangs. Write
- // callbacks are suspended while inside pa_threaded_mainloop_lock() so this
- // is all thread safe.
- operation = pa_stream_flush(
- output_stream_, &pulse::StreamSuccessCallback, pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-
- operation = pa_stream_cork(output_stream_, 1, &pulse::StreamSuccessCallback,
- pa_mainloop_);
- WaitForOperationCompletion(pa_mainloop_, operation);
-}
-
-void PulseAudioUnifiedStream::SetVolume(double volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- volume_ = static_cast<float>(volume);
-}
-
-void PulseAudioUnifiedStream::GetVolume(double* volume) {
- DCHECK(manager_->GetMessageLoop()->BelongsToCurrentThread());
-
- *volume = volume_;
-}
-
-} // namespace media
diff --git a/chromium/media/audio/pulse/pulse_unified.h b/chromium/media/audio/pulse/pulse_unified.h
deleted file mode 100644
index a800d099a10..00000000000
--- a/chromium/media/audio/pulse/pulse_unified.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
-#define MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
-
-#include <pulse/pulseaudio.h>
-#include <string>
-
-#include "base/memory/scoped_ptr.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_fifo.h"
-
-namespace media {
-
-class AudioManagerBase;
-class SeekableBuffer;
-
-class PulseAudioUnifiedStream : public AudioOutputStream {
- public:
- PulseAudioUnifiedStream(const AudioParameters& params,
- const std::string& input_device_id,
- AudioManagerBase* manager);
-
- virtual ~PulseAudioUnifiedStream();
-
- // Implementation of PulseAudioUnifiedStream.
- virtual bool Open() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- private:
- // Called by PulseAudio when |pa_stream_| change state. If an unexpected
- // failure state change happens and |source_callback_| is set
- // this method will forward the error via OnError().
- static void StreamNotifyCallback(pa_stream* s, void* user_data);
-
- // Called by PulseAudio recording stream when it has data.
- static void ReadCallback(pa_stream* s, size_t length, void* user_data);
-
- // Helpers for ReadCallback() to read and write data.
- void WriteData(size_t requested_bytes);
- void ReadData();
-
- // Close() helper function to free internal structs.
- void Reset();
-
- // AudioParameters from the constructor.
- const AudioParameters params_;
-
- // Device unique ID of the input device.
- const std::string input_device_id_;
-
- // Audio manager that created us. Used to report that we've closed.
- AudioManagerBase* manager_;
-
- // PulseAudio API structs.
- pa_context* pa_context_;
- pa_threaded_mainloop* pa_mainloop_;
- pa_stream* input_stream_;
- pa_stream* output_stream_;
-
- // Float representation of volume from 0.0 to 1.0.
- float volume_;
-
- // Callback to audio data source. Must only be modified while holding a lock
- // on |pa_mainloop_| via pa_threaded_mainloop_lock().
- AudioSourceCallback* source_callback_;
-
- scoped_ptr<AudioBus> input_bus_;
- scoped_ptr<AudioBus> output_bus_;
-
- // Used for input to output buffering.
- scoped_ptr<media::SeekableBuffer> fifo_;
-
- // Temporary storage for recorded data. It gets a packet of data from
- // |fifo_| and deliver the data to OnMoreIOData() callback.
- scoped_ptr<uint8[]> input_data_buffer_;
-
- DISALLOW_COPY_AND_ASSIGN(PulseAudioUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_PULSE_PULSE_UNIFIED_H_
diff --git a/chromium/media/audio/pulse/pulse_util.cc b/chromium/media/audio/pulse/pulse_util.cc
index 96831cfabe3..66f52c2c700 100644
--- a/chromium/media/audio/pulse/pulse_util.cc
+++ b/chromium/media/audio/pulse/pulse_util.cc
@@ -41,8 +41,6 @@ pa_channel_position ChromiumToPAChannelPosition(Channels channel) {
return PA_CHANNEL_POSITION_SIDE_LEFT;
case SIDE_RIGHT:
return PA_CHANNEL_POSITION_SIDE_RIGHT;
- case CHANNELS_MAX:
- return PA_CHANNEL_POSITION_INVALID;
default:
NOTREACHED() << "Invalid channel: " << channel;
return PA_CHANNEL_POSITION_INVALID;
@@ -86,7 +84,7 @@ pa_channel_map ChannelLayoutToPAChannelMap(ChannelLayout channel_layout) {
pa_channel_map_init(&channel_map);
channel_map.channels = ChannelLayoutToChannelCount(channel_layout);
- for (Channels ch = LEFT; ch < CHANNELS_MAX;
+ for (Channels ch = LEFT; ch <= CHANNELS_MAX;
ch = static_cast<Channels>(ch + 1)) {
int channel_index = ChannelOrder(channel_layout, ch);
if (channel_index < 0)
@@ -205,6 +203,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
pa_context** context,
pa_stream** stream,
const AudioParameters& params,
+ const std::string& device_id,
pa_stream_notify_cb_t stream_callback,
pa_stream_request_cb_t write_callback,
void* user_data) {
@@ -287,12 +286,16 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
// and error.
RETURN_ON_FAILURE(
pa_stream_connect_playback(
- *stream, NULL, &pa_buffer_attributes,
+ *stream,
+ device_id == AudioManagerBase::kDefaultDeviceId ?
+ NULL : device_id.c_str(),
+ &pa_buffer_attributes,
static_cast<pa_stream_flags_t>(
PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY |
PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_NOT_MONOTONIC |
PA_STREAM_START_CORKED),
- NULL, NULL) == 0,
+ NULL,
+ NULL) == 0,
"pa_stream_connect_playback FAILED ");
// Wait for the stream to be ready.
diff --git a/chromium/media/audio/pulse/pulse_util.h b/chromium/media/audio/pulse/pulse_util.h
index da0cb0f42d7..791d6ade83a 100644
--- a/chromium/media/audio/pulse/pulse_util.h
+++ b/chromium/media/audio/pulse/pulse_util.h
@@ -69,6 +69,7 @@ bool CreateOutputStream(pa_threaded_mainloop** mainloop,
pa_context** context,
pa_stream** stream,
const AudioParameters& params,
+ const std::string& device_id,
pa_stream_notify_cb_t stream_callback,
pa_stream_request_cb_t write_callback,
void* user_data);
diff --git a/chromium/media/audio/sample_rates.cc b/chromium/media/audio/sample_rates.cc
index a082a938ab8..7fa62a79ed1 100644
--- a/chromium/media/audio/sample_rates.cc
+++ b/chromium/media/audio/sample_rates.cc
@@ -4,23 +4,48 @@
#include "media/audio/sample_rates.h"
+#include "base/logging.h"
+
namespace media {
-AudioSampleRate AsAudioSampleRate(int sample_rate) {
+bool ToAudioSampleRate(int sample_rate, AudioSampleRate* asr) {
+ DCHECK(asr);
switch (sample_rate) {
- case 8000: return k8000Hz;
- case 16000: return k16000Hz;
- case 32000: return k32000Hz;
- case 48000: return k48000Hz;
- case 96000: return k96000Hz;
- case 11025: return k11025Hz;
- case 22050: return k22050Hz;
- case 44100: return k44100Hz;
- case 88200: return k88200Hz;
- case 176400: return k176400Hz;
- case 192000: return k192000Hz;
+ case 8000:
+ *asr = k8000Hz;
+ return true;
+ case 16000:
+ *asr = k16000Hz;
+ return true;
+ case 32000:
+ *asr = k32000Hz;
+ return true;
+ case 48000:
+ *asr = k48000Hz;
+ return true;
+ case 96000:
+ *asr = k96000Hz;
+ return true;
+ case 11025:
+ *asr = k11025Hz;
+ return true;
+ case 22050:
+ *asr = k22050Hz;
+ return true;
+ case 44100:
+ *asr = k44100Hz;
+ return true;
+ case 88200:
+ *asr = k88200Hz;
+ return true;
+ case 176400:
+ *asr = k176400Hz;
+ return true;
+ case 192000:
+ *asr = k192000Hz;
+ return true;
}
- return kUnexpectedAudioSampleRate;
+ return false;
}
} // namespace media
diff --git a/chromium/media/audio/sample_rates.h b/chromium/media/audio/sample_rates.h
index 7c29e548b34..482ec0fdc8b 100644
--- a/chromium/media/audio/sample_rates.h
+++ b/chromium/media/audio/sample_rates.h
@@ -23,12 +23,13 @@ enum AudioSampleRate {
k88200Hz = 8,
k176400Hz = 9,
k192000Hz = 10,
- kUnexpectedAudioSampleRate // Must always be last!
+ // Must always equal the largest value ever reported:
+ kAudioSampleRateMax = k192000Hz,
};
// Helper method to convert integral values to their respective enum values,
-// or kUnexpectedAudioSampleRate if no match exists.
-MEDIA_EXPORT AudioSampleRate AsAudioSampleRate(int sample_rate);
+// returns false for unexpected sample rates.
+MEDIA_EXPORT bool ToAudioSampleRate(int sample_rate, AudioSampleRate* asr);
} // namespace media
diff --git a/chromium/media/audio/scoped_loop_observer.h b/chromium/media/audio/scoped_loop_observer.h
deleted file mode 100644
index 7aaab542225..00000000000
--- a/chromium/media/audio/scoped_loop_observer.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
-#define MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
-
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
-
-namespace base {
-class WaitableEvent;
-}
-
-namespace media {
-
-// A common base class for AudioOutputDevice and AudioInputDevice that manages
-// a message loop pointer and monitors it for destruction. If the object goes
-// out of scope before the message loop, the object will automatically remove
-// itself from the message loop's list of destruction observers.
-// NOTE: The class that inherits from this class must implement the
-// WillDestroyCurrentMessageLoop virtual method from DestructionObserver.
-class ScopedLoopObserver
- : public base::MessageLoop::DestructionObserver {
- public:
- explicit ScopedLoopObserver(
- const scoped_refptr<base::MessageLoopProxy>& message_loop);
-
- protected:
- virtual ~ScopedLoopObserver();
-
- // Accessor to the loop that's used by the derived class.
- const scoped_refptr<base::MessageLoopProxy>& message_loop() { return loop_; }
-
- private:
- // Call to add or remove ourselves from the list of destruction observers for
- // the message loop.
- void ObserveLoopDestruction(bool enable, base::WaitableEvent* done);
-
- // A pointer to the message loop's proxy. In case the loop gets destroyed
- // before this object goes out of scope, PostTask etc will fail but not crash.
- scoped_refptr<base::MessageLoopProxy> loop_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedLoopObserver);
-};
-
-} // namespace media.
-
-#endif // MEDIA_AUDIO_SCOPED_LOOP_OBSERVER_H_
diff --git a/chromium/media/audio/scoped_loop_observer.cc b/chromium/media/audio/scoped_task_runner_observer.cc
index 01187ec8f99..9f4eac28511 100644
--- a/chromium/media/audio/scoped_loop_observer.cc
+++ b/chromium/media/audio/scoped_task_runner_observer.cc
@@ -2,27 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "media/audio/scoped_loop_observer.h"
+#include "media/audio/scoped_task_runner_observer.h"
#include "base/bind.h"
#include "base/synchronization/waitable_event.h"
namespace media {
-ScopedLoopObserver::ScopedLoopObserver(
- const scoped_refptr<base::MessageLoopProxy>& loop)
- : loop_(loop) {
+ScopedTaskRunnerObserver::ScopedTaskRunnerObserver(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner)
+ : task_runner_(task_runner) {
ObserveLoopDestruction(true, NULL);
}
-ScopedLoopObserver::~ScopedLoopObserver() {
+ScopedTaskRunnerObserver::~ScopedTaskRunnerObserver() {
ObserveLoopDestruction(false, NULL);
}
-void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
- base::WaitableEvent* done) {
+void ScopedTaskRunnerObserver::ObserveLoopDestruction(
+ bool enable,
+ base::WaitableEvent* done) {
// Note: |done| may be NULL.
- if (loop_->BelongsToCurrentThread()) {
+ if (task_runner_->BelongsToCurrentThread()) {
base::MessageLoop* loop = base::MessageLoop::current();
if (enable) {
loop->AddDestructionObserver(this);
@@ -31,8 +32,8 @@ void ScopedLoopObserver::ObserveLoopDestruction(bool enable,
}
} else {
base::WaitableEvent event(false, false);
- if (loop_->PostTask(FROM_HERE,
- base::Bind(&ScopedLoopObserver::ObserveLoopDestruction,
+ if (task_runner_->PostTask(FROM_HERE,
+ base::Bind(&ScopedTaskRunnerObserver::ObserveLoopDestruction,
base::Unretained(this), enable, &event))) {
event.Wait();
} else {
diff --git a/chromium/media/audio/scoped_task_runner_observer.h b/chromium/media/audio/scoped_task_runner_observer.h
new file mode 100644
index 00000000000..ce9adf96713
--- /dev/null
+++ b/chromium/media/audio/scoped_task_runner_observer.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_AUDIO_SCOPED_TASK_RUNNER_OBSERVER_H_
+#define MEDIA_AUDIO_SCOPED_TASK_RUNNER_OBSERVER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop.h"
+
+namespace base {
+class SingleThreadTaskRunner;
+class WaitableEvent;
+}
+
+namespace media {
+
+// A common base class for AudioOutputDevice and AudioInputDevice that manages
+// a task runner and monitors it for destruction. If the object goes out of
+// scope before the task runner, the object will automatically remove itself
+// from the task runner's list of destruction observers.
+// NOTE: The class that inherits from this class must implement the
+// WillDestroyCurrentMessageLoop virtual method from DestructionObserver.
+class ScopedTaskRunnerObserver
+ : public base::MessageLoop::DestructionObserver {
+ public:
+ explicit ScopedTaskRunnerObserver(
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner);
+
+ protected:
+ virtual ~ScopedTaskRunnerObserver();
+
+ // Accessor to the loop that's used by the derived class.
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner() {
+ return task_runner_;
+ }
+
+ private:
+ // Call to add or remove ourselves from the list of destruction observers for
+ // the message loop.
+ void ObserveLoopDestruction(bool enable, base::WaitableEvent* done);
+
+ // A pointer to the task runner. In case it gets destroyed before this object
+ // goes out of scope, PostTask() etc will fail but not crash.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunnerObserver);
+};
+
+} // namespace media.
+
+#endif // MEDIA_AUDIO_SCOPED_TASK_RUNNER_OBSERVER_H_
diff --git a/chromium/media/audio/simple_sources.cc b/chromium/media/audio/simple_sources.cc
index 275413a232c..039029e5388 100644
--- a/chromium/media/audio/simple_sources.cc
+++ b/chromium/media/audio/simple_sources.cc
@@ -48,12 +48,6 @@ int SineWaveAudioSource::OnMoreData(AudioBus* audio_bus,
return max_frames;
}
-int SineWaveAudioSource::OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) {
- return OnMoreData(dest, audio_buffers);
-}
-
void SineWaveAudioSource::OnError(AudioOutputStream* stream) {
errors_++;
}
diff --git a/chromium/media/audio/simple_sources.h b/chromium/media/audio/simple_sources.h
index 449f875b5d6..6303386ead1 100644
--- a/chromium/media/audio/simple_sources.h
+++ b/chromium/media/audio/simple_sources.h
@@ -29,9 +29,6 @@ class MEDIA_EXPORT SineWaveAudioSource
// Implementation of AudioSourceCallback.
virtual int OnMoreData(AudioBus* audio_bus,
AudioBuffersState audio_buffers) OVERRIDE;
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) OVERRIDE;
virtual void OnError(AudioOutputStream* stream) OVERRIDE;
// The number of OnMoreData()+OnMoreIOData() and OnError() calls respectively.
diff --git a/chromium/media/audio/sounds/audio_stream_handler.cc b/chromium/media/audio/sounds/audio_stream_handler.cc
index 08608ac4187..645fcb366a3 100644
--- a/chromium/media/audio/sounds/audio_stream_handler.cc
+++ b/chromium/media/audio/sounds/audio_stream_handler.cc
@@ -6,8 +6,11 @@
#include <string>
+#include "base/cancelable_callback.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
#include "media/audio/audio_manager.h"
#include "media/audio/audio_manager_base.h"
#include "media/base/channel_layout.h"
@@ -22,6 +25,9 @@ const double kOutputVolumePercent = 0.8;
// The number of frames each OnMoreData() call will request.
const int kDefaultFrameCount = 1024;
+// Keep alive timeout for audio stream.
+const int kKeepAliveMs = 1500;
+
AudioStreamHandler::TestObserver* g_observer_for_testing = NULL;
AudioOutputStream::AudioSourceCallback* g_audio_source_for_testing = NULL;
@@ -30,36 +36,53 @@ AudioOutputStream::AudioSourceCallback* g_audio_source_for_testing = NULL;
class AudioStreamHandler::AudioStreamContainer
: public AudioOutputStream::AudioSourceCallback {
public:
- AudioStreamContainer(const WavAudioHandler& wav_audio,
- const AudioParameters& params)
- : stream_(NULL),
- wav_audio_(wav_audio),
- params_(params),
- cursor_(0) {
- }
+ AudioStreamContainer(const WavAudioHandler& wav_audio)
+ : started_(false),
+ stream_(NULL),
+ cursor_(0),
+ delayed_stop_posted_(false),
+ wav_audio_(wav_audio) {}
virtual ~AudioStreamContainer() {
- DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
}
void Play() {
- DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
if (!stream_) {
- stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params_,
- std::string(),
+ const AudioParameters& p = wav_audio_.params();
+ const AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ p.channel_layout(),
+ p.sample_rate(),
+ p.bits_per_sample(),
+ kDefaultFrameCount);
+ stream_ = AudioManager::Get()->MakeAudioOutputStreamProxy(params,
std::string());
if (!stream_ || !stream_->Open()) {
LOG(ERROR) << "Failed to open an output stream.";
return;
}
stream_->SetVolume(kOutputVolumePercent);
- } else {
- // TODO (ygorshenin@): implement smart stream rewind.
- stream_->Stop();
}
- cursor_ = 0;
+ {
+ base::AutoLock al(state_lock_);
+
+ delayed_stop_posted_ = false;
+ stop_closure_.Reset(base::Bind(&AudioStreamContainer::StopStream,
+ base::Unretained(this)));
+
+ if (started_) {
+ if (wav_audio_.AtEnd(cursor_))
+ cursor_ = 0;
+ return;
+ }
+
+ cursor_ = 0;
+ }
+
+ started_ = true;
if (g_audio_source_for_testing)
stream_->Start(g_audio_source_for_testing);
else
@@ -70,15 +93,12 @@ class AudioStreamHandler::AudioStreamContainer
}
void Stop() {
- DCHECK(AudioManager::Get()->GetMessageLoop()->BelongsToCurrentThread());
- if (!stream_)
- return;
- stream_->Stop();
- stream_->Close();
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
+ StopStream();
+ if (stream_)
+ stream_->Close();
stream_ = NULL;
-
- if (g_observer_for_testing)
- g_observer_for_testing->OnStop(cursor_);
+ stop_closure_.Cancel();
}
private:
@@ -86,35 +106,51 @@ class AudioStreamHandler::AudioStreamContainer
// Following methods could be called from *ANY* thread.
virtual int OnMoreData(AudioBus* dest,
AudioBuffersState /* state */) OVERRIDE {
+ base::AutoLock al(state_lock_);
size_t bytes_written = 0;
+
if (wav_audio_.AtEnd(cursor_) ||
!wav_audio_.CopyTo(dest, cursor_, &bytes_written)) {
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ if (delayed_stop_posted_)
+ return 0;
+ delayed_stop_posted_ = true;
+ AudioManager::Get()->GetTaskRunner()->PostDelayedTask(
FROM_HERE,
- base::Bind(&AudioStreamContainer::Stop, base::Unretained(this)));
+ stop_closure_.callback(),
+ base::TimeDelta::FromMilliseconds(kKeepAliveMs));
return 0;
}
cursor_ += bytes_written;
-
return dest->frames();
}
- virtual int OnMoreIOData(AudioBus* /* source */,
- AudioBus* dest,
- AudioBuffersState state) OVERRIDE {
- return OnMoreData(dest, state);
- }
-
virtual void OnError(AudioOutputStream* /* stream */) OVERRIDE {
LOG(ERROR) << "Error during system sound reproduction.";
}
- AudioOutputStream* stream_;
+ void StopStream() {
+ DCHECK(AudioManager::Get()->GetTaskRunner()->BelongsToCurrentThread());
- const WavAudioHandler wav_audio_;
- const AudioParameters params_;
+ if (stream_ && started_) {
+ // Do not hold the |state_lock_| while stopping the output stream.
+ stream_->Stop();
+ if (g_observer_for_testing)
+ g_observer_for_testing->OnStop(cursor_);
+ }
+
+ started_ = false;
+ }
+ // Must only be accessed on the AudioManager::GetTaskRunner() thread.
+ bool started_;
+ AudioOutputStream* stream_;
+
+ // All variables below must be accessed under |state_lock_| when |started_|.
+ base::Lock state_lock_;
size_t cursor_;
+ bool delayed_stop_posted_;
+ const WavAudioHandler wav_audio_;
+ base::CancelableClosure stop_closure_;
DISALLOW_COPY_AND_ASSIGN(AudioStreamContainer);
};
@@ -127,26 +163,21 @@ AudioStreamHandler::AudioStreamHandler(const base::StringPiece& wav_data)
LOG(ERROR) << "Can't get access to audio manager.";
return;
}
- AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
- GuessChannelLayout(wav_audio_.num_channels()),
- wav_audio_.sample_rate(),
- wav_audio_.bits_per_sample(),
- kDefaultFrameCount);
- if (!params.IsValid()) {
+ if (!wav_audio_.params().IsValid()) {
LOG(ERROR) << "Audio params are invalid.";
return;
}
- stream_.reset(new AudioStreamContainer(wav_audio_, params));
+ stream_.reset(new AudioStreamContainer(wav_audio_));
initialized_ = true;
}
AudioStreamHandler::~AudioStreamHandler() {
DCHECK(CalledOnValidThread());
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioStreamContainer::Stop, base::Unretained(stream_.get())));
- AudioManager::Get()->GetMessageLoop()->DeleteSoon(FROM_HERE,
- stream_.release());
+ AudioManager::Get()->GetTaskRunner()->DeleteSoon(FROM_HERE,
+ stream_.release());
}
bool AudioStreamHandler::IsInitialized() const {
@@ -160,7 +191,7 @@ bool AudioStreamHandler::Play() {
if (!IsInitialized())
return false;
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(base::IgnoreResult(&AudioStreamContainer::Play),
base::Unretained(stream_.get())));
@@ -169,7 +200,7 @@ bool AudioStreamHandler::Play() {
void AudioStreamHandler::Stop() {
DCHECK(CalledOnValidThread());
- AudioManager::Get()->GetMessageLoop()->PostTask(
+ AudioManager::Get()->GetTaskRunner()->PostTask(
FROM_HERE,
base::Bind(&AudioStreamContainer::Stop, base::Unretained(stream_.get())));
}
diff --git a/chromium/media/audio/sounds/audio_stream_handler.h b/chromium/media/audio/sounds/audio_stream_handler.h
index 7c63a24f034..f814aaef599 100644
--- a/chromium/media/audio/sounds/audio_stream_handler.h
+++ b/chromium/media/audio/sounds/audio_stream_handler.h
@@ -42,10 +42,12 @@ class MEDIA_EXPORT AudioStreamHandler : public base::NonThreadSafe {
// Returns true iff AudioStreamHandler is correctly initialized;
bool IsInitialized() const;
- // Stops any previous playback if it's still not completed and
- // starts new playback. Volume level will be set according to
- // current settings and won't be changed during playback. Returns
- // true iff new playback was successfully started.
+ // Plays sound. Volume level will be set according to current settings
+ // and won't be changed during playback. Returns true iff new playback
+ // was successfully started.
+ //
+ // NOTE: if current playback isn't at end of stream, playback request
+ // is dropped, but true is returned.
bool Play();
// Stops current playback.
diff --git a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
index 50bc301c38a..acf472a0e48 100644
--- a/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
+++ b/chromium/media/audio/sounds/audio_stream_handler_unittest.cc
@@ -74,7 +74,7 @@ TEST_F(AudioStreamHandlerTest, Play) {
ASSERT_EQ(4, observer.cursor());
}
-TEST_F(AudioStreamHandlerTest, Rewind) {
+TEST_F(AudioStreamHandlerTest, ConsecutivePlayRequests) {
base::RunLoop run_loop;
TestObserver observer(run_loop.QuitClosure());
SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 8000);
@@ -89,19 +89,19 @@ TEST_F(AudioStreamHandlerTest, Rewind) {
FROM_HERE,
base::Bind(base::IgnoreResult(&AudioStreamHandler::Play),
base::Unretained(audio_stream_handler())),
- base::TimeDelta::FromSeconds(3));
+ base::TimeDelta::FromSeconds(1));
base::MessageLoop::current()->PostDelayedTask(
FROM_HERE,
base::Bind(&AudioStreamHandler::Stop,
base::Unretained(audio_stream_handler())),
- base::TimeDelta::FromSeconds(6));
+ base::TimeDelta::FromSeconds(2));
run_loop.Run();
SetObserverForTesting(NULL);
SetAudioSourceForTesting(NULL);
- ASSERT_EQ(2, observer.num_play_requests());
+ ASSERT_EQ(1, observer.num_play_requests());
ASSERT_EQ(1, observer.num_stop_requests());
}
diff --git a/chromium/media/audio/sounds/sounds_manager.cc b/chromium/media/audio/sounds/sounds_manager.cc
index e93dc6588dd..e80843685cc 100644
--- a/chromium/media/audio/sounds/sounds_manager.cc
+++ b/chromium/media/audio/sounds/sounds_manager.cc
@@ -4,21 +4,20 @@
#include "media/audio/sounds/sounds_manager.h"
-#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/memory/linked_ptr.h"
#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/audio_manager.h"
#include "media/audio/sounds/audio_stream_handler.h"
-#include "media/base/media_switches.h"
namespace media {
namespace {
SoundsManager* g_instance = NULL;
+bool g_initialized_for_testing = false;
// SoundsManagerImpl ---------------------------------------------------
@@ -35,13 +34,14 @@ class SoundsManagerImpl : public SoundsManager {
private:
base::hash_map<SoundKey, linked_ptr<AudioStreamHandler> > handlers_;
- scoped_refptr<base::MessageLoopProxy> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
DISALLOW_COPY_AND_ASSIGN(SoundsManagerImpl);
};
SoundsManagerImpl::SoundsManagerImpl()
- : message_loop_(AudioManager::Get()->GetMessageLoop()) {}
+ : task_runner_(AudioManager::Get()->GetTaskRunner()) {
+}
SoundsManagerImpl::~SoundsManagerImpl() { DCHECK(CalledOnValidThread()); }
@@ -74,46 +74,7 @@ base::TimeDelta SoundsManagerImpl::GetDuration(SoundKey key) {
return base::TimeDelta();
}
const WavAudioHandler& wav_audio = handlers_[key]->wav_audio_handler();
- const int64 size = wav_audio.size();
- const int64 rate = wav_audio.byte_rate();
- return base::TimeDelta::FromMicroseconds(size * 1000000 / rate);
-}
-
-// SoundsManagerStub ---------------------------------------------------
-
-class SoundsManagerStub : public SoundsManager {
- public:
- SoundsManagerStub();
- virtual ~SoundsManagerStub();
-
- // SoundsManager implementation:
- virtual bool Initialize(SoundKey key,
- const base::StringPiece& data) OVERRIDE;
- virtual bool Play(SoundKey key) OVERRIDE;
- virtual base::TimeDelta GetDuration(SoundKey key) OVERRIDE;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SoundsManagerStub);
-};
-
-SoundsManagerStub::SoundsManagerStub() {}
-
-SoundsManagerStub::~SoundsManagerStub() { DCHECK(CalledOnValidThread()); }
-
-bool SoundsManagerStub::Initialize(SoundKey /* key */,
- const base::StringPiece& /* data */) {
- DCHECK(CalledOnValidThread());
- return false;
-}
-
-bool SoundsManagerStub::Play(SoundKey /* key */) {
- DCHECK(CalledOnValidThread());
- return false;
-}
-
-base::TimeDelta SoundsManagerStub::GetDuration(SoundKey /* key */) {
- DCHECK(CalledOnValidThread());
- return base::TimeDelta();
+ return wav_audio.params().GetBufferDuration();
}
} // namespace
@@ -124,13 +85,11 @@ SoundsManager::~SoundsManager() { DCHECK(CalledOnValidThread()); }
// static
void SoundsManager::Create() {
- CHECK(!g_instance) << "SoundsManager::Create() is called twice";
- const bool enabled = !CommandLine::ForCurrentProcess()->HasSwitch(
- ::switches::kDisableSystemSoundsManager);
- if (enabled)
- g_instance = new SoundsManagerImpl();
- else
- g_instance = new SoundsManagerStub();
+ CHECK(!g_instance || g_initialized_for_testing)
+ << "SoundsManager::Create() is called twice";
+ if (g_initialized_for_testing)
+ return;
+ g_instance = new SoundsManagerImpl();
}
// static
@@ -147,4 +106,12 @@ SoundsManager* SoundsManager::Get() {
return g_instance;
}
+// static
+void SoundsManager::InitializeForTesting(SoundsManager* manager) {
+ CHECK(!g_instance) << "SoundsManager is already initialized.";
+ CHECK(manager);
+ g_instance = manager;
+ g_initialized_for_testing = true;
+}
+
} // namespace media
diff --git a/chromium/media/audio/sounds/sounds_manager.h b/chromium/media/audio/sounds/sounds_manager.h
index 7ff6aafffdc..71184da3522 100644
--- a/chromium/media/audio/sounds/sounds_manager.h
+++ b/chromium/media/audio/sounds/sounds_manager.h
@@ -29,6 +29,10 @@ class MEDIA_EXPORT SoundsManager : public base::NonThreadSafe {
// Returns a pointer to a singleton instance of the SoundsManager.
static SoundsManager* Get();
+ // Initializes sounds manager for testing. The |manager| will be owned
+ // by the internal pointer and will be deleted by Shutdown().
+ static void InitializeForTesting(SoundsManager* manager);
+
// Initializes SoundsManager with the wav data for the system
// sounds. Returns true if SoundsManager was successfully
// initialized.
diff --git a/chromium/media/audio/sounds/sounds_manager_unittest.cc b/chromium/media/audio/sounds/sounds_manager_unittest.cc
index 5aa3694e838..78f564ec8b9 100644
--- a/chromium/media/audio/sounds/sounds_manager_unittest.cc
+++ b/chromium/media/audio/sounds/sounds_manager_unittest.cc
@@ -54,7 +54,7 @@ TEST_F(SoundsManagerTest, Play) {
ASSERT_TRUE(SoundsManager::Get()->Initialize(
kTestAudioKey,
base::StringPiece(kTestAudioData, arraysize(kTestAudioData))));
- ASSERT_EQ(41,
+ ASSERT_EQ(20,
SoundsManager::Get()->GetDuration(kTestAudioKey).InMicroseconds());
ASSERT_TRUE(SoundsManager::Get()->Play(kTestAudioKey));
run_loop.Run();
diff --git a/chromium/media/audio/sounds/wav_audio_handler.cc b/chromium/media/audio/sounds/wav_audio_handler.cc
index 20eab8be437..b87baa8fd3f 100644
--- a/chromium/media/audio/sounds/wav_audio_handler.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler.cc
@@ -33,14 +33,14 @@ const size_t kFmtChunkMinimumSize = 16;
const size_t kAudioFormatOffset = 0;
const size_t kChannelOffset = 2;
const size_t kSampleRateOffset = 4;
-const size_t kByteRateOffset = 8;
const size_t kBitsPerSampleOffset = 14;
// Some constants for audio format.
const int kAudioFormatPCM = 1;
// Reads an integer from |data| with |offset|.
-template<typename T> T ReadInt(const base::StringPiece& data, size_t offset) {
+template <typename T>
+T ReadInt(const base::StringPiece& data, size_t offset) {
CHECK_LE(offset + sizeof(T), data.size());
T result;
memcpy(&result, data.data() + offset, sizeof(T));
@@ -57,7 +57,6 @@ namespace media {
WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
: num_channels_(0),
sample_rate_(0),
- byte_rate_(0),
bits_per_sample_(0) {
CHECK_LE(kWavFileHeaderSize, wav_data.size()) << "wav data is too small";
CHECK(wav_data.starts_with(kChunkId) &&
@@ -72,11 +71,17 @@ WavAudioHandler::WavAudioHandler(const base::StringPiece& wav_data)
CHECK_LE(0, length) << "can't parse wav sub-chunk";
offset += length;
}
-}
-WavAudioHandler::~WavAudioHandler() {
+ const int frame_count = data_.size() * 8 / num_channels_ / bits_per_sample_;
+ params_ = AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ GuessChannelLayout(num_channels_),
+ sample_rate_,
+ bits_per_sample_,
+ frame_count);
}
+WavAudioHandler::~WavAudioHandler() {}
+
bool WavAudioHandler::AtEnd(size_t cursor) const {
return data_.size() <= cursor;
}
@@ -86,18 +91,20 @@ bool WavAudioHandler::CopyTo(AudioBus* bus,
size_t* bytes_written) const {
if (!bus)
return false;
- if (bus->channels() != num_channels_) {
- LOG(ERROR) << "Number of channel mismatch.";
+ if (bus->channels() != params_.channels()) {
+ DVLOG(1) << "Number of channel mismatch.";
return false;
}
if (AtEnd(cursor)) {
bus->Zero();
return true;
}
- const int remaining_frames = (data_.size() - cursor) / bytes_per_frame_;
+ const int remaining_frames =
+ (data_.size() - cursor) / params_.GetBytesPerFrame();
const int frames = std::min(bus->frames(), remaining_frames);
- bus->FromInterleaved(data_.data() + cursor, frames, bytes_per_sample_);
- *bytes_written = frames * bytes_per_frame_;
+ bus->FromInterleaved(data_.data() + cursor, frames,
+ params_.bits_per_sample() / 8);
+ *bytes_written = frames * params_.GetBytesPerFrame();
bus->ZeroFramesPartial(frames, bus->frames() - frames);
return true;
}
@@ -113,23 +120,20 @@ int WavAudioHandler::ParseSubChunk(const base::StringPiece& data) {
if (!ParseDataChunk(data.substr(kChunkHeaderSize, chunk_length)))
return -1;
} else {
- LOG(ERROR) << "Unknown data chunk: " << data.substr(0, 4) << ".";
+ DVLOG(1) << "Unknown data chunk: " << data.substr(0, 4) << ".";
}
return chunk_length + kChunkHeaderSize;
}
bool WavAudioHandler::ParseFmtChunk(const base::StringPiece& data) {
if (data.size() < kFmtChunkMinimumSize) {
- LOG(ERROR) << "Data size " << data.size() << " is too short.";
+ DLOG(ERROR) << "Data size " << data.size() << " is too short.";
return false;
}
DCHECK_EQ(ReadInt<uint16>(data, kAudioFormatOffset), kAudioFormatPCM);
num_channels_ = ReadInt<uint16>(data, kChannelOffset);
sample_rate_ = ReadInt<uint32>(data, kSampleRateOffset);
- byte_rate_ = ReadInt<uint32>(data, kByteRateOffset);
bits_per_sample_ = ReadInt<uint16>(data, kBitsPerSampleOffset);
- bytes_per_sample_ = bits_per_sample_ >> 3;
- bytes_per_frame_ = num_channels_ * bytes_per_sample_;
return true;
}
diff --git a/chromium/media/audio/sounds/wav_audio_handler.h b/chromium/media/audio/sounds/wav_audio_handler.h
index a2c3e023650..82b5cc5f842 100644
--- a/chromium/media/audio/sounds/wav_audio_handler.h
+++ b/chromium/media/audio/sounds/wav_audio_handler.h
@@ -6,6 +6,8 @@
#define MEDIA_AUDIO_SOUNDS_WAV_AUDIO_HANDLER_H_
#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+#include "media/audio/audio_parameters.h"
#include "media/base/media_export.h"
namespace media {
@@ -27,11 +29,8 @@ class MEDIA_EXPORT WavAudioHandler {
// |bytes_written|. |bytes_written| should not be NULL.
bool CopyTo(AudioBus* bus, size_t cursor, size_t* bytes_written) const;
- int size() const { return data_.size(); }
- uint16 num_channels() const { return num_channels_; }
- uint32 sample_rate() const { return sample_rate_; }
- uint32 byte_rate() const { return byte_rate_; }
- uint16 bits_per_sample() const { return bits_per_sample_; }
+ const AudioParameters& params() const { return params_; }
+ const base::StringPiece& data() const { return data_; }
private:
// Parses a chunk of wav format data. Returns the length of the chunk.
@@ -46,12 +45,11 @@ class MEDIA_EXPORT WavAudioHandler {
// Data part of the |wav_data_|.
base::StringPiece data_;
+ AudioParameters params_;
+
uint16 num_channels_;
uint32 sample_rate_;
- uint32 byte_rate_;
uint16 bits_per_sample_;
- int bytes_per_sample_;
- int bytes_per_frame_;
};
} // namespace media
diff --git a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
index a7f8728be35..6098b9399e0 100644
--- a/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
+++ b/chromium/media/audio/sounds/wav_audio_handler_unittest.cc
@@ -16,18 +16,22 @@ namespace media {
TEST(WavAudioHandlerTest, SampleDataTest) {
WavAudioHandler handler(base::StringPiece(kTestAudioData,
arraysize(kTestAudioData)));
- ASSERT_EQ(static_cast<uint16>(2), handler.num_channels());
- ASSERT_EQ(static_cast<uint16>(16), handler.bits_per_sample());
- ASSERT_EQ(static_cast<uint32>(48000), handler.sample_rate());
- ASSERT_EQ(static_cast<uint32>(96000), handler.byte_rate());
+ const AudioParameters& params = handler.params();
+ ASSERT_EQ(2, params.channels());
+ ASSERT_EQ(16, params.bits_per_sample());
+ ASSERT_EQ(48000, params.sample_rate());
+ ASSERT_EQ(192000, params.GetBytesPerSecond());
+
+ ASSERT_EQ(4U, handler.data().size());
+ const char kData[] = "\x01\x00\x01\x00";
+ ASSERT_EQ(base::StringPiece(kData, arraysize(kData) - 1), handler.data());
- ASSERT_EQ(4, handler.size());
scoped_ptr<AudioBus> bus = AudioBus::Create(
- handler.num_channels(),
- handler.size() / handler.num_channels());
+ params.channels(), handler.data().size() / params.channels());
+
size_t bytes_written;
ASSERT_TRUE(handler.CopyTo(bus.get(), 0, &bytes_written));
- ASSERT_EQ(static_cast<size_t>(handler.size()), bytes_written);
+ ASSERT_EQ(static_cast<size_t>(handler.data().size()), bytes_written);
}
} // namespace media
diff --git a/chromium/media/audio/test_audio_input_controller_factory.cc b/chromium/media/audio/test_audio_input_controller_factory.cc
index 3aeb7773366..4490dc9ac84 100644
--- a/chromium/media/audio/test_audio_input_controller_factory.cc
+++ b/chromium/media/audio/test_audio_input_controller_factory.cc
@@ -18,7 +18,7 @@ TestAudioInputController::TestAudioInputController(
audio_parameters_(audio_parameters),
factory_(factory),
event_handler_(event_handler) {
- message_loop_ = audio_manager->GetMessageLoop();
+ task_runner_ = audio_manager->GetTaskRunner();
}
TestAudioInputController::~TestAudioInputController() {
@@ -32,7 +32,7 @@ void TestAudioInputController::Record() {
}
void TestAudioInputController::Close(const base::Closure& closed_task) {
- message_loop_->PostTask(FROM_HERE, closed_task);
+ task_runner_->PostTask(FROM_HERE, closed_task);
if (factory_->delegate_)
factory_->delegate_->TestAudioControllerClosed(this);
}
diff --git a/chromium/media/audio/virtual_audio_input_stream.cc b/chromium/media/audio/virtual_audio_input_stream.cc
index 9c4e7a1f16f..f660b9c9521 100644
--- a/chromium/media/audio/virtual_audio_input_stream.cc
+++ b/chromium/media/audio/virtual_audio_input_stream.cc
@@ -8,8 +8,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
+#include "base/single_thread_task_runner.h"
#include "media/audio/virtual_audio_output_stream.h"
namespace media {
@@ -50,18 +49,18 @@ class LoopbackAudioConverter : public AudioConverter::InputCallback {
VirtualAudioInputStream::VirtualAudioInputStream(
const AudioParameters& params,
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AfterCloseCallback& after_close_cb)
- : worker_loop_(worker_loop),
+ : worker_task_runner_(worker_task_runner),
after_close_cb_(after_close_cb),
callback_(NULL),
buffer_(new uint8[params.GetBytesPerBuffer()]),
params_(params),
mixer_(params_, params_, false),
num_attached_output_streams_(0),
- fake_consumer_(worker_loop_, params_) {
+ fake_consumer_(worker_task_runner_, params_) {
DCHECK(params_.IsValid());
- DCHECK(worker_loop_.get());
+ DCHECK(worker_task_runner_.get());
// VAIS can be constructed on any thread, but will DCHECK that all
// AudioInputStream methods are called from the same thread.
@@ -97,6 +96,7 @@ void VirtualAudioInputStream::Start(AudioInputCallback* callback) {
void VirtualAudioInputStream::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
fake_consumer_.Stop();
+ callback_ = NULL;
}
void VirtualAudioInputStream::AddOutputStream(
@@ -133,31 +133,19 @@ void VirtualAudioInputStream::RemoveOutputStream(
}
void VirtualAudioInputStream::PumpAudio(AudioBus* audio_bus) {
- DCHECK(worker_loop_->BelongsToCurrentThread());
- DCHECK(callback_);
+ DCHECK(worker_task_runner_->BelongsToCurrentThread());
{
base::AutoLock scoped_lock(converter_network_lock_);
mixer_.Convert(audio_bus);
}
- audio_bus->ToInterleaved(params_.frames_per_buffer(),
- params_.bits_per_sample() / 8,
- buffer_.get());
- callback_->OnData(this,
- buffer_.get(),
- params_.GetBytesPerBuffer(),
- params_.GetBytesPerBuffer(),
- 1.0);
+ callback_->OnData(this, audio_bus, params_.GetBytesPerBuffer(), 1.0);
}
void VirtualAudioInputStream::Close() {
DCHECK(thread_checker_.CalledOnValidThread());
Stop(); // Make sure callback_ is no longer being used.
- if (callback_) {
- callback_->OnClose(this);
- callback_ = NULL;
- }
// If a non-null AfterCloseCallback was provided to the constructor, invoke it
// here. The callback is moved to a stack-local first since |this| could be
diff --git a/chromium/media/audio/virtual_audio_input_stream.h b/chromium/media/audio/virtual_audio_input_stream.h
index 53a10738732..17e2730d535 100644
--- a/chromium/media/audio/virtual_audio_input_stream.h
+++ b/chromium/media/audio/virtual_audio_input_stream.h
@@ -18,7 +18,7 @@
#include "media/base/audio_converter.h"
namespace base {
-class MessageLoopProxy;
+class SingleThreadTaskRunner;
}
namespace media {
@@ -36,12 +36,12 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
AfterCloseCallback;
// Construct a target for audio loopback which mixes multiple data streams
- // into a single stream having the given |params|. |worker_loop| is the loop
- // on which AudioInputCallback methods are called and may or may not be the
- // single thread that invokes the AudioInputStream methods.
+ // into a single stream having the given |params|. |worker_task_runner| is
+ // the task runner on which AudioInputCallback methods are called and may or
+ // may not be the single thread that invokes the AudioInputStream methods.
VirtualAudioInputStream(
const AudioParameters& params,
- const scoped_refptr<base::MessageLoopProxy>& worker_loop,
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner,
const AfterCloseCallback& after_close_cb);
virtual ~VirtualAudioInputStream();
@@ -78,7 +78,7 @@ class MEDIA_EXPORT VirtualAudioInputStream : public AudioInputStream {
// Invoked on the worker thread.
void PumpAudio(AudioBus* audio_bus);
- const scoped_refptr<base::MessageLoopProxy> worker_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
AfterCloseCallback after_close_cb_;
diff --git a/chromium/media/audio/virtual_audio_input_stream_unittest.cc b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
index aab67cca571..3aa87b0a179 100644
--- a/chromium/media/audio/virtual_audio_input_stream_unittest.cc
+++ b/chromium/media/audio/virtual_audio_input_stream_unittest.cc
@@ -6,7 +6,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
#include "base/rand_util.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
@@ -33,17 +32,17 @@ class MockInputCallback : public AudioInputStream::AudioInputCallback {
public:
MockInputCallback()
: data_pushed_(false, false) {
- ON_CALL(*this, OnData(_, _, _, _, _))
- .WillByDefault(InvokeWithoutArgs(&data_pushed_,
- &base::WaitableEvent::Signal));
+ ON_CALL(*this, OnData(_, _, _, _)).WillByDefault(
+ InvokeWithoutArgs(&data_pushed_, &base::WaitableEvent::Signal));
}
virtual ~MockInputCallback() {}
- MOCK_METHOD5(OnData, void(AudioInputStream* stream, const uint8* data,
- uint32 size, uint32 hardware_delay_bytes,
- double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream* stream,
+ const AudioBus* source,
+ uint32 hardware_delay_bytes,
+ double volume));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
void WaitForDataPushes() {
@@ -74,15 +73,6 @@ class TestAudioSource : public SineWaveAudioSource {
return ret;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState audio_buffers) OVERRIDE {
- const int ret =
- SineWaveAudioSource::OnMoreIOData(source, dest, audio_buffers);
- data_pulled_.Signal();
- return ret;
- }
-
void WaitForDataPulls() {
for (int i = 0; i < 3; ++i) {
data_pulled_.Wait();
@@ -105,7 +95,7 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
stream_(NULL),
closed_stream_(false, false) {
audio_thread_->Start();
- audio_message_loop_ = audio_thread_->message_loop_proxy();
+ audio_task_runner_ = audio_thread_->message_loop_proxy();
}
virtual ~VirtualAudioInputStreamTest() {
@@ -118,15 +108,13 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
void Create() {
const bool worker_is_separate_thread = GetParam();
stream_ = new VirtualAudioInputStream(
- kParams, GetWorkerLoop(worker_is_separate_thread),
+ kParams, GetWorkerTaskRunner(worker_is_separate_thread),
base::Bind(&base::DeletePointer<VirtualAudioInputStream>));
stream_->Open();
}
void Start() {
- EXPECT_CALL(input_callback_, OnClose(_));
- EXPECT_CALL(input_callback_, OnData(_, NotNull(), _, _, _))
- .Times(AtLeast(1));
+ EXPECT_CALL(input_callback_, OnData(_, NotNull(), _, _)).Times(AtLeast(1));
ASSERT_TRUE(!!stream_);
stream_->Start(&input_callback_);
@@ -209,36 +197,36 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
stopped_output_streams_.clear();
}
- const scoped_refptr<base::MessageLoopProxy>& audio_message_loop() const {
- return audio_message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner>& audio_task_runner() const {
+ return audio_task_runner_;
}
- const scoped_refptr<base::MessageLoopProxy>& GetWorkerLoop(
+ const scoped_refptr<base::SingleThreadTaskRunner>& GetWorkerTaskRunner(
bool worker_is_separate_thread) {
if (worker_is_separate_thread) {
if (!worker_thread_->IsRunning()) {
worker_thread_->Start();
- worker_message_loop_ = worker_thread_->message_loop_proxy();
+ worker_task_runner_ = worker_thread_->message_loop_proxy();
}
- return worker_message_loop_;
+ return worker_task_runner_;
} else {
- return audio_message_loop_;
+ return audio_task_runner_;
}
}
private:
void SyncWithAudioThread() {
base::WaitableEvent done(false, false);
- audio_message_loop_->PostTask(
+ audio_task_runner_->PostTask(
FROM_HERE,
base::Bind(&base::WaitableEvent::Signal, base::Unretained(&done)));
done.Wait();
}
scoped_ptr<base::Thread> audio_thread_;
- scoped_refptr<base::MessageLoopProxy> audio_message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner_;
scoped_ptr<base::Thread> worker_thread_;
- scoped_refptr<base::MessageLoopProxy> worker_message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> worker_task_runner_;
VirtualAudioInputStream* stream_;
MockInputCallback input_callback_;
@@ -252,7 +240,7 @@ class VirtualAudioInputStreamTest : public testing::TestWithParam<bool> {
};
#define RUN_ON_AUDIO_THREAD(method) \
- audio_message_loop()->PostTask( \
+ audio_task_runner()->PostTask( \
FROM_HERE, base::Bind(&VirtualAudioInputStreamTest::method, \
base::Unretained(this)))
diff --git a/chromium/media/audio/virtual_audio_output_stream_unittest.cc b/chromium/media/audio/virtual_audio_output_stream_unittest.cc
index 1e3abd1c6bb..72e794d6feb 100644
--- a/chromium/media/audio/virtual_audio_output_stream_unittest.cc
+++ b/chromium/media/audio/virtual_audio_output_stream_unittest.cc
@@ -4,8 +4,6 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/message_loop/message_loop.h"
-#include "base/message_loop/message_loop_proxy.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
@@ -27,10 +25,10 @@ const AudioParameters kParams(
class MockVirtualAudioInputStream : public VirtualAudioInputStream {
public:
explicit MockVirtualAudioInputStream(
- const scoped_refptr<base::MessageLoopProxy>& worker_loop)
+ const scoped_refptr<base::SingleThreadTaskRunner>& worker_task_runner)
: VirtualAudioInputStream(
kParams,
- worker_loop,
+ worker_task_runner,
base::Bind(&base::DeletePointer<VirtualAudioInputStream>)) {}
~MockVirtualAudioInputStream() {}
@@ -53,16 +51,16 @@ class VirtualAudioOutputStreamTest : public testing::Test {
VirtualAudioOutputStreamTest()
: audio_thread_(new base::Thread("AudioThread")) {
audio_thread_->Start();
- audio_message_loop_ = audio_thread_->message_loop_proxy();
+ audio_task_runner_ = audio_thread_->message_loop_proxy();
}
- const scoped_refptr<base::MessageLoopProxy>& audio_message_loop() const {
- return audio_message_loop_;
+ const scoped_refptr<base::SingleThreadTaskRunner>& audio_task_runner() const {
+ return audio_task_runner_;
}
void SyncWithAudioThread() {
base::WaitableEvent done(false, false);
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&base::WaitableEvent::Signal,
base::Unretained(&done)));
done.Wait();
@@ -70,7 +68,7 @@ class VirtualAudioOutputStreamTest : public testing::Test {
private:
scoped_ptr<base::Thread> audio_thread_;
- scoped_refptr<base::MessageLoopProxy> audio_message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> audio_task_runner_;
DISALLOW_COPY_AND_ASSIGN(VirtualAudioOutputStreamTest);
};
@@ -79,8 +77,8 @@ TEST_F(VirtualAudioOutputStreamTest, StartStopStartStop) {
static const int kCycles = 3;
MockVirtualAudioInputStream* const input_stream =
- new MockVirtualAudioInputStream(audio_message_loop());
- audio_message_loop()->PostTask(
+ new MockVirtualAudioInputStream(audio_task_runner());
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(
base::IgnoreResult(&MockVirtualAudioInputStream::Open),
base::Unretained(input_stream)));
@@ -95,24 +93,24 @@ TEST_F(VirtualAudioOutputStreamTest, StartStopStartStop) {
EXPECT_CALL(*input_stream, RemoveOutputStream(output_stream, _))
.Times(kCycles);
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(base::IgnoreResult(&VirtualAudioOutputStream::Open),
base::Unretained(output_stream)));
SineWaveAudioSource source(CHANNEL_LAYOUT_STEREO, 200.0, 128);
for (int i = 0; i < kCycles; ++i) {
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&VirtualAudioOutputStream::Start,
base::Unretained(output_stream),
&source));
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&VirtualAudioOutputStream::Stop,
base::Unretained(output_stream)));
}
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&VirtualAudioOutputStream::Close,
base::Unretained(output_stream)));
- audio_message_loop()->PostTask(
+ audio_task_runner()->PostTask(
FROM_HERE, base::Bind(&MockVirtualAudioInputStream::Close,
base::Unretained(input_stream)));
diff --git a/chromium/media/audio/win/audio_device_listener_win.cc b/chromium/media/audio/win/audio_device_listener_win.cc
index adbc9a82e4d..ecf83874867 100644
--- a/chromium/media/audio/win/audio_device_listener_win.cc
+++ b/chromium/media/audio/win/audio_device_listener_win.cc
@@ -30,6 +30,27 @@ static std::string RoleToString(ERole role) {
}
}
+static std::string GetDeviceId(EDataFlow flow,
+ ERole role) {
+ ScopedComPtr<IMMDevice> device =
+ CoreAudioUtil::CreateDefaultDevice(flow, role);
+ if (!device) {
+ // Most probable reason for ending up here is that all audio devices are
+ // disabled or unplugged.
+ DVLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
+ return std::string();
+ }
+
+ AudioDeviceName device_name;
+ HRESULT hr = CoreAudioUtil::GetDeviceName(device, &device_name);
+ if (FAILED(hr)) {
+ DVLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
+ return std::string();
+ }
+
+ return device_name.unique_id;
+}
+
AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
: listener_cb_(listener_cb) {
CHECK(CoreAudioUtil::IsSupported());
@@ -48,22 +69,12 @@ AudioDeviceListenerWin::AudioDeviceListenerWin(const base::Closure& listener_cb)
device_enumerator_ = device_enumerator;
- ScopedComPtr<IMMDevice> device =
- CoreAudioUtil::CreateDefaultDevice(eRender, eConsole);
- if (!device) {
- // Most probable reason for ending up here is that all audio devices are
- // disabled or unplugged.
- VLOG(1) << "CoreAudioUtil::CreateDefaultDevice failed. No device?";
- return;
- }
-
- AudioDeviceName device_name;
- hr = CoreAudioUtil::GetDeviceName(device, &device_name);
- if (FAILED(hr)) {
- VLOG(1) << "Failed to retrieve the device id: " << std::hex << hr;
- return;
- }
- default_render_device_id_ = device_name.unique_id;
+ default_render_device_id_ = GetDeviceId(eRender, eConsole);
+ default_capture_device_id_ = GetDeviceId(eCapture, eConsole);
+ default_communications_render_device_id_ =
+ GetDeviceId(eRender, eCommunications);
+ default_communications_capture_device_id_ =
+ GetDeviceId(eCapture, eCommunications);
}
AudioDeviceListenerWin::~AudioDeviceListenerWin() {
@@ -126,14 +137,29 @@ STDMETHODIMP AudioDeviceListenerWin::OnDeviceStateChanged(LPCWSTR device_id,
STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
EDataFlow flow, ERole role, LPCWSTR new_default_device_id) {
- // Only listen for output device changes right now...
- if (flow != eConsole && role != eRender)
+ // Only listen for console and communication device changes.
+ if ((role != eConsole && role != eCommunications) ||
+ (flow != eRender && flow != eCapture)) {
return S_OK;
+ }
+
+ // Grab a pointer to the appropriate ID member.
+ // Note that there are three "?:"'s here to select the right ID.
+ std::string* current_device_id =
+ role == eRender ? (
+ flow == eConsole ?
+ &default_render_device_id_ :
+ &default_communications_render_device_id_
+ ) : (
+ flow == eConsole ?
+ &default_capture_device_id_ :
+ &default_communications_capture_device_id_
+ );
// If no device is now available, |new_default_device_id| will be NULL.
std::string new_device_id;
if (new_default_device_id)
- new_device_id = WideToUTF8(new_default_device_id);
+ new_device_id = base::WideToUTF8(new_default_device_id);
VLOG(1) << "OnDefaultDeviceChanged() "
<< "new_default_device: "
@@ -146,10 +172,11 @@ STDMETHODIMP AudioDeviceListenerWin::OnDefaultDeviceChanged(
// TODO(dalecurtis): This still seems to fire an extra event on my machine for
// an unplug event (probably others too); e.g., we get two transitions to a
// new default device id.
- if (new_device_id.compare(default_render_device_id_) == 0)
+ if (new_device_id.compare(*current_device_id) == 0)
return S_OK;
- default_render_device_id_ = new_device_id;
+ // Store the new id in the member variable (that current_device_id points to).
+ *current_device_id = new_device_id;
listener_cb_.Run();
return S_OK;
diff --git a/chromium/media/audio/win/audio_device_listener_win.h b/chromium/media/audio/win/audio_device_listener_win.h
index 6a312519af9..92777a12a0d 100644
--- a/chromium/media/audio/win/audio_device_listener_win.h
+++ b/chromium/media/audio/win/audio_device_listener_win.h
@@ -49,6 +49,9 @@ class MEDIA_EXPORT AudioDeviceListenerWin : public IMMNotificationClient {
base::Closure listener_cb_;
ScopedComPtr<IMMDeviceEnumerator> device_enumerator_;
std::string default_render_device_id_;
+ std::string default_capture_device_id_;
+ std::string default_communications_render_device_id_;
+ std::string default_communications_capture_device_id_;
// AudioDeviceListenerWin must be constructed and destructed on one thread.
base::ThreadChecker thread_checker_;
diff --git a/chromium/media/audio/win/audio_device_listener_win_unittest.cc b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
index 3076fff2513..14b70a8fe90 100644
--- a/chromium/media/audio/win/audio_device_listener_win_unittest.cc
+++ b/chromium/media/audio/win/audio_device_listener_win_unittest.cc
@@ -47,7 +47,7 @@ class AudioDeviceListenerWinTest : public testing::Test {
bool SimulateDefaultOutputDeviceChange(const char* new_device_id) {
return output_device_listener_->OnDefaultDeviceChanged(
static_cast<EDataFlow>(eConsole), static_cast<ERole>(eRender),
- ASCIIToWide(new_device_id).c_str()) == S_OK;
+ base::ASCIIToWide(new_device_id).c_str()) == S_OK;
}
void SetOutputDeviceId(std::string new_device_id) {
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.cc b/chromium/media/audio/win/audio_low_latency_input_win.cc
index b16ef130a9f..c43ed22977c 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win.cc
@@ -9,22 +9,48 @@
#include "base/strings/utf_string_conversions.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
+#include "media/base/audio_bus.h"
using base::win::ScopedComPtr;
using base::win::ScopedCOMInitializer;
namespace media {
+namespace {
+
+// Returns true if |device| represents the default communication capture device.
+bool IsDefaultCommunicationDevice(IMMDeviceEnumerator* enumerator,
+ IMMDevice* device) {
+ ScopedComPtr<IMMDevice> communications;
+ if (FAILED(enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
+ communications.Receive()))) {
+ return false;
+ }
+
+ base::win::ScopedCoMem<WCHAR> communications_id, device_id;
+ device->GetId(&device_id);
+ communications->GetId(&communications_id);
+ return lstrcmpW(communications_id, device_id) == 0;
+}
-WASAPIAudioInputStream::WASAPIAudioInputStream(
- AudioManagerWin* manager, const AudioParameters& params,
- const std::string& device_id)
+} // namespace
+
+WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
+ const AudioParameters& params,
+ const std::string& device_id)
: manager_(manager),
capture_thread_(NULL),
opened_(false),
started_(false),
+ frame_size_(0),
+ packet_size_frames_(0),
+ packet_size_bytes_(0),
endpoint_buffer_size_frames_(0),
+ effects_(params.effects()),
device_id_(device_id),
- sink_(NULL) {
+ perf_count_to_100ns_units_(0.0),
+ ms_to_frame_count_(0.0),
+ sink_(NULL),
+ audio_bus_(media::AudioBus::Create(params)) {
DCHECK(manager_);
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
@@ -67,8 +93,7 @@ WASAPIAudioInputStream::WASAPIAudioInputStream(
perf_count_to_100ns_units_ =
(10000000.0 / static_cast<double>(performance_frequency.QuadPart));
} else {
- LOG(ERROR) << "High-resolution performance counters are not supported.";
- perf_count_to_100ns_units_ = 0.0;
+ DLOG(ERROR) << "High-resolution performance counters are not supported.";
}
}
@@ -123,6 +148,7 @@ void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
if (started_)
return;
+ DCHECK(!sink_);
sink_ = callback;
// Starts periodic AGC microphone measurements if the AGC has been enabled
@@ -173,6 +199,7 @@ void WASAPIAudioInputStream::Stop() {
}
started_ = false;
+ sink_ = NULL;
}
void WASAPIAudioInputStream::Close() {
@@ -180,10 +207,6 @@ void WASAPIAudioInputStream::Close() {
// It is valid to call Close() before calling open or Start().
// It is also valid to call Close() after Start() has been called.
Stop();
- if (sink_) {
- sink_->OnClose(this);
- sink_ = NULL;
- }
// Inform the audio manager that we have been closed. This will cause our
// destruction.
@@ -240,30 +263,32 @@ double WASAPIAudioInputStream::GetVolume() {
}
// static
-int WASAPIAudioInputStream::HardwareSampleRate(
+AudioParameters WASAPIAudioInputStream::GetInputStreamParameters(
const std::string& device_id) {
- base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
- if (FAILED(hr))
- return 0;
+ int sample_rate = 48000;
+ ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
-}
-
-// static
-uint32 WASAPIAudioInputStream::HardwareChannelCount(
- const std::string& device_id) {
base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
- HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
- if (FAILED(hr))
- return 0;
+ int effects = AudioParameters::NO_EFFECTS;
+ if (SUCCEEDED(GetMixFormat(device_id, &audio_engine_mix_format, &effects))) {
+ sample_rate = static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
+ channel_layout = audio_engine_mix_format->nChannels == 1 ?
+ CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ }
- return static_cast<uint32>(audio_engine_mix_format->nChannels);
+ // Use 10ms frame size as default.
+ int frames_per_buffer = sample_rate / 100;
+ return AudioParameters(
+ AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, 0, sample_rate,
+ 16, frames_per_buffer, effects);
}
// static
HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
- WAVEFORMATEX** device_format) {
+ WAVEFORMATEX** device_format,
+ int* effects) {
+ DCHECK(effects);
+
// It is assumed that this static method is called from a COM thread, i.e.,
// CoInitializeEx() is not called here to avoid STA/MTA conflicts.
ScopedComPtr<IMMDeviceEnumerator> enumerator;
@@ -278,18 +303,22 @@ HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
endpoint_device.Receive());
} else if (device_id == AudioManagerBase::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
+ // Get the mix format of the default playback stream.
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
endpoint_device.Receive());
} else {
// Retrieve a capture endpoint device that is specified by an endpoint
// device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
+ hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id).c_str(),
endpoint_device.Receive());
}
+
if (FAILED(hr))
return hr;
+ *effects = IsDefaultCommunicationDevice(enumerator, endpoint_device) ?
+ AudioParameters::DUCKING : AudioParameters::NO_EFFECTS;
+
ScopedComPtr<IAudioClient> audio_client;
hr = endpoint_device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
@@ -408,16 +437,15 @@ void WASAPIAudioInputStream::Run() {
// size which was specified at construction.
uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5);
while (buffer_frame_index >= packet_size_frames_) {
- uint8* audio_data =
- reinterpret_cast<uint8*>(capture_buffer.get());
+ // Copy data to audio bus to match the OnData interface.
+ uint8* audio_data = reinterpret_cast<uint8*>(capture_buffer.get());
+ audio_bus_->FromInterleaved(
+ audio_data, audio_bus_->frames(), format_.wBitsPerSample / 8);
// Deliver data packet, delay estimation and volume level to
// the user.
- sink_->OnData(this,
- audio_data,
- packet_size_bytes_,
- delay_frames * frame_size_,
- volume);
+ sink_->OnData(
+ this, audio_bus_.get(), delay_frames * frame_size_, volume);
// Store parts of the recorded data which can't be delivered
// using the current packet size. The stored section will be used
@@ -458,6 +486,8 @@ void WASAPIAudioInputStream::HandleError(HRESULT err) {
}
HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
+ DCHECK(!endpoint_device_);
+
ScopedComPtr<IMMDeviceEnumerator> enumerator;
HRESULT hr = enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER);
@@ -466,22 +496,42 @@ HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
// Retrieve the IMMDevice by using the specified role or the specified
// unique endpoint device-identification string.
- // TODO(henrika): possibly add support for the eCommunications as well.
- if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Retrieve the default capture audio endpoint for the specified role.
- // Note that, in Windows Vista, the MMDevice API supports device roles
- // but the system-supplied user interface programs do not.
- hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
- endpoint_device_.Receive());
- } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
- // Capture the default playback stream.
- hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+
+ if (effects_ & AudioParameters::DUCKING) {
+ // Ducking has been requested and it is only supported for the default
+ // communication device. So, let's open up the communication device and
+ // see if the ID of that device matches the requested ID.
+ // We consider a kDefaultDeviceId as well as an explicit device id match,
+ // to be valid matches.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eCommunications,
endpoint_device_.Receive());
- } else {
- // Retrieve a capture endpoint device that is specified by an endpoint
- // device-identification string.
- hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
- endpoint_device_.Receive());
+ if (endpoint_device_ && device_id_ != AudioManagerBase::kDefaultDeviceId) {
+ base::win::ScopedCoMem<WCHAR> communications_id;
+ endpoint_device_->GetId(&communications_id);
+ if (device_id_ !=
+ base::WideToUTF8(static_cast<WCHAR*>(communications_id))) {
+ DLOG(WARNING) << "Ducking has been requested for a non-default device."
+ "Not supported.";
+ endpoint_device_.Release(); // Fall back on code below.
+ }
+ }
+ }
+
+ if (!endpoint_device_) {
+ if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
+ // Retrieve the default capture audio endpoint for the specified role.
+ // Note that, in Windows Vista, the MMDevice API supports device roles
+ // but the system-supplied user interface programs do not.
+ hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
+ endpoint_device_.Receive());
+ } else if (device_id_ == AudioManagerBase::kLoopbackInputDeviceId) {
+ // Capture the default playback stream.
+ hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole,
+ endpoint_device_.Receive());
+ } else {
+ hr = enumerator->GetDevice(base::UTF8ToUTF16(device_id_).c_str(),
+ endpoint_device_.Receive());
+ }
}
if (FAILED(hr))
diff --git a/chromium/media/audio/win/audio_low_latency_input_win.h b/chromium/media/audio/win/audio_low_latency_input_win.h
index 99e1604925a..a33a582c976 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win.h
+++ b/chromium/media/audio/win/audio_low_latency_input_win.h
@@ -75,6 +75,7 @@
namespace media {
+class AudioBus;
class AudioManagerWin;
// AudioInputStream implementation using Windows Core Audio APIs.
@@ -88,6 +89,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
WASAPIAudioInputStream(AudioManagerWin* manager,
const AudioParameters& params,
const std::string& device_id);
+
// The dtor is typically called by the AudioManager only and it is usually
// triggered by calling AudioInputStream::Close().
virtual ~WASAPIAudioInputStream();
@@ -101,16 +103,11 @@ class MEDIA_EXPORT WASAPIAudioInputStream
virtual void SetVolume(double volume) OVERRIDE;
virtual double GetVolume() OVERRIDE;
- // Retrieves the sample rate used by the audio engine for its internal
- // processing/mixing of shared-mode streams given a specifed device.
- static int HardwareSampleRate(const std::string& device_id);
-
- // Retrieves the number of audio channels used by the audio engine for its
- // internal processing/mixing of shared-mode streams given a specified device.
- static uint32 HardwareChannelCount(const std::string& device_id);
-
bool started() const { return started_; }
+ // Returns the default hardware audio parameters of the specific device.
+ static AudioParameters GetInputStreamParameters(const std::string& device_id);
+
private:
// DelegateSimpleThread::Delegate implementation.
virtual void Run() OVERRIDE;
@@ -127,8 +124,11 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Retrieves the stream format that the audio engine uses for its internal
// processing/mixing of shared-mode streams.
+ // |effects| is a an AudioParameters::effects() flag that will have the
+ // DUCKING flag raised for only the default communication device.
static HRESULT GetMixFormat(const std::string& device_id,
- WAVEFORMATEX** device_format);
+ WAVEFORMATEX** device_format,
+ int* effects);
// Our creator, the audio manager needs to be notified when we close.
AudioManagerWin* manager_;
@@ -157,6 +157,9 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// Length of the audio endpoint buffer.
uint32 endpoint_buffer_size_frames_;
+ // A copy of the supplied AudioParameter's |effects|.
+ const int effects_;
+
// Contains the unique name of the selected endpoint device.
// Note that AudioManagerBase::kDefaultDeviceId represents the default
// device role and is not a valid ID as such.
@@ -178,7 +181,7 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// An IMMDevice interface which represents an audio endpoint device.
base::win::ScopedComPtr<IMMDevice> endpoint_device_;
- // Windows Audio Session API (WASAP) interfaces.
+ // Windows Audio Session API (WASAPI) interfaces.
// An IAudioClient interface which enables a client to create and initialize
// an audio stream between an audio application and the audio engine.
@@ -209,6 +212,10 @@ class MEDIA_EXPORT WASAPIAudioInputStream
// This event will be signaled when capturing shall stop.
base::win::ScopedHandle stop_capture_event_;
+ // Extra audio bus used for storage of deinterleaved data for the OnData
+ // callback.
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream);
};
diff --git a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
index 54bd3f71b26..eee18873f6c 100644
--- a/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -38,24 +38,23 @@ ACTION_P3(CheckCountAndPostQuitTask, count, limit, loop) {
class MockAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
- MOCK_METHOD5(OnData, void(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume));
- MOCK_METHOD1(OnClose, void(AudioInputStream* stream));
+ MOCK_METHOD4(OnData,
+ void(AudioInputStream* stream,
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume));
MOCK_METHOD1(OnError, void(AudioInputStream* stream));
};
class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
public:
FakeAudioInputCallback()
- : closed_(false),
- error_(false),
- data_event_(false, false) {
- }
+ : error_(false),
+ data_event_(false, false),
+ num_received_audio_frames_(0) {}
- const std::vector<uint8>& received_data() const { return received_data_; }
- bool closed() const { return closed_; }
bool error() const { return error_; }
+ int num_received_audio_frames() const { return num_received_audio_frames_; }
// Waits until OnData() is called on another thread.
void WaitForData() {
@@ -63,24 +62,21 @@ class FakeAudioInputCallback : public AudioInputStream::AudioInputCallback {
}
virtual void OnData(AudioInputStream* stream,
- const uint8* src, uint32 size,
- uint32 hardware_delay_bytes, double volume) OVERRIDE {
- received_data_.insert(received_data_.end(), src, src + size);
+ const AudioBus* src,
+ uint32 hardware_delay_bytes,
+ double volume) OVERRIDE {
+ EXPECT_NE(hardware_delay_bytes, 0u);
+ num_received_audio_frames_ += src->frames();
data_event_.Signal();
}
- virtual void OnClose(AudioInputStream* stream) OVERRIDE {
- closed_ = true;
- }
-
virtual void OnError(AudioInputStream* stream) OVERRIDE {
error_ = true;
}
private:
- std::vector<uint8> received_data_;
+ int num_received_audio_frames_;
base::WaitableEvent data_event_;
- bool closed_;
bool error_;
DISALLOW_COPY_AND_ASSIGN(FakeAudioInputCallback);
@@ -94,8 +90,9 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
// 2 bytes per sample, 2 channels, 10ms @ 48kHz, 10 seconds <=> 1920000 bytes.
static const size_t kMaxBufferSize = 2 * 2 * 480 * 100 * 10;
- explicit WriteToFileAudioSink(const char* file_name)
- : buffer_(0, kMaxBufferSize),
+ explicit WriteToFileAudioSink(const char* file_name, int bits_per_sample)
+ : bits_per_sample_(bits_per_sample),
+ buffer_(0, kMaxBufferSize),
bytes_to_write_(0) {
base::FilePath file_path;
EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_path));
@@ -103,6 +100,7 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
binary_file_ = base::OpenFile(file_path, "wb");
DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
VLOG(0) << ">> Output file: " << file_path.value() << " has been created.";
+ VLOG(0) << "bits_per_sample_:" << bits_per_sample_;
}
virtual ~WriteToFileAudioSink() {
@@ -125,22 +123,28 @@ class WriteToFileAudioSink : public AudioInputStream::AudioInputCallback {
// AudioInputStream::AudioInputCallback implementation.
virtual void OnData(AudioInputStream* stream,
- const uint8* src,
- uint32 size,
+ const AudioBus* src,
uint32 hardware_delay_bytes,
double volume) {
+ EXPECT_EQ(bits_per_sample_, 16);
+ const int num_samples = src->frames() * src->channels();
+ scoped_ptr<int16> interleaved(new int16[num_samples]);
+ const int bytes_per_sample = sizeof(*interleaved);
+ src->ToInterleaved(src->frames(), bytes_per_sample, interleaved.get());
+
// Store data data in a temporary buffer to avoid making blocking
// fwrite() calls in the audio callback. The complete buffer will be
// written to file in the destructor.
- if (buffer_.Append(src, size)) {
+ const int size = bytes_per_sample * num_samples;
+ if (buffer_.Append((const uint8*)interleaved.get(), size)) {
bytes_to_write_ += size;
}
}
- virtual void OnClose(AudioInputStream* stream) {}
virtual void OnError(AudioInputStream* stream) {}
private:
+ int bits_per_sample_;
media::SeekableBuffer buffer_;
FILE* binary_file_;
size_t bytes_to_write_;
@@ -169,14 +173,13 @@ class AudioInputStreamWrapper {
explicit AudioInputStreamWrapper(AudioManager* audio_manager)
: com_init_(ScopedCOMInitializer::kMTA),
audio_man_(audio_manager),
- format_(AudioParameters::AUDIO_PCM_LOW_LATENCY),
- channel_layout_(CHANNEL_LAYOUT_STEREO),
- bits_per_sample_(16) {
- // Use native/mixing sample rate and 10ms frame size as default.
- sample_rate_ = static_cast<int>(
- WASAPIAudioInputStream::HardwareSampleRate(
- AudioManagerBase::kDefaultDeviceId));
- samples_per_packet_ = sample_rate_ / 100;
+ default_params_(
+ audio_manager->GetInputStreamParameters(
+ AudioManagerBase::kDefaultDeviceId)) {
+ EXPECT_EQ(format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
+ frames_per_buffer_ = default_params_.frames_per_buffer();
+ // We expect the default buffer size to be a 10ms buffer.
+ EXPECT_EQ(frames_per_buffer_, sample_rate() / 100);
}
~AudioInputStreamWrapper() {}
@@ -188,36 +191,35 @@ class AudioInputStreamWrapper {
// Creates AudioInputStream object using non-default parameters where the
// frame size is modified.
- AudioInputStream* Create(int samples_per_packet) {
- samples_per_packet_ = samples_per_packet;
+ AudioInputStream* Create(int frames_per_buffer) {
+ frames_per_buffer_ = frames_per_buffer;
return CreateInputStream();
}
- AudioParameters::Format format() const { return format_; }
+ AudioParameters::Format format() const { return default_params_.format(); }
int channels() const {
- return ChannelLayoutToChannelCount(channel_layout_);
+ return ChannelLayoutToChannelCount(default_params_.channel_layout());
}
- int bits_per_sample() const { return bits_per_sample_; }
- int sample_rate() const { return sample_rate_; }
- int samples_per_packet() const { return samples_per_packet_; }
+ int bits_per_sample() const { return default_params_.bits_per_sample(); }
+ int sample_rate() const { return default_params_.sample_rate(); }
+ int frames_per_buffer() const { return frames_per_buffer_; }
private:
AudioInputStream* CreateInputStream() {
AudioInputStream* ais = audio_man_->MakeAudioInputStream(
- AudioParameters(format_, channel_layout_, sample_rate_,
- bits_per_sample_, samples_per_packet_),
- AudioManagerBase::kDefaultDeviceId);
+ AudioParameters(format(), default_params_.channel_layout(),
+ default_params_.input_channels(),
+ sample_rate(), bits_per_sample(), frames_per_buffer_,
+ default_params_.effects()),
+ AudioManagerBase::kDefaultDeviceId);
EXPECT_TRUE(ais);
return ais;
}
ScopedCOMInitializer com_init_;
AudioManager* audio_man_;
- AudioParameters::Format format_;
- ChannelLayout channel_layout_;
- int bits_per_sample_;
- int sample_rate_;
- int samples_per_packet_;
+ const AudioParameters default_params_;
+ int frames_per_buffer_;
};
// Convenience method which creates a default AudioInputStream object.
@@ -278,10 +280,9 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamHardwareSampleRate) {
for (media::AudioDeviceNames::const_iterator it = device_names.begin();
it != device_names.end(); ++it) {
// Retrieve the hardware sample rate given a specified audio input device.
- // TODO(tommi): ensure that we don't have to cast here.
- int fs = static_cast<int>(WASAPIAudioInputStream::HardwareSampleRate(
- it->unique_id));
- EXPECT_GE(fs, 0);
+ AudioParameters params = WASAPIAudioInputStream::GetInputStreamParameters(
+ it->unique_id);
+ EXPECT_GE(params.sample_rate(), 0);
}
}
@@ -316,8 +317,6 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartAndClose) {
EXPECT_TRUE(ais->Open());
MockAudioInputCallback sink;
ais->Start(&sink);
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
@@ -332,8 +331,6 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamOpenStartStopAndClose) {
MockAudioInputCallback sink;
ais->Start(&sink);
ais->Stop();
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
@@ -364,9 +361,6 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamMiscCallingSequences) {
EXPECT_FALSE(wais->started());
ais->Stop();
EXPECT_FALSE(wais->started());
-
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
@@ -389,14 +383,13 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
MockAudioInputCallback sink;
// Derive the expected size in bytes of each recorded packet.
- uint32 bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ uint32 bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
// We use 10ms packets and will run the test until ten packets are received.
// All should contain valid packets of the same size and a valid delay
// estimate.
- EXPECT_CALL(sink, OnData(
- ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
@@ -404,54 +397,44 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamTestPacketSizes) {
ais->Stop();
// Store current packet size (to be used in the subsequent tests).
- int samples_per_packet_10ms = aisw.samples_per_packet();
+ int frames_per_buffer_10ms = aisw.frames_per_buffer();
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
// 20 ms packet size.
count = 0;
- ais.Reset(aisw.Create(2 * samples_per_packet_10ms));
+ ais.Reset(aisw.Create(2 * frames_per_buffer_10ms));
EXPECT_TRUE(ais->Open());
- bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
- EXPECT_CALL(sink, OnData(
- ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
loop.Run();
ais->Stop();
-
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
// 5 ms packet size.
count = 0;
- ais.Reset(aisw.Create(samples_per_packet_10ms / 2));
+ ais.Reset(aisw.Create(frames_per_buffer_10ms / 2));
EXPECT_TRUE(ais->Open());
- bytes_per_packet = aisw.channels() * aisw.samples_per_packet() *
+ bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
(aisw.bits_per_sample() / 8);
- EXPECT_CALL(sink, OnData(
- ais.get(), NotNull(), bytes_per_packet, Gt(bytes_per_packet), _))
+ EXPECT_CALL(sink, OnData(ais.get(), NotNull(), Gt(bytes_per_packet), _))
.Times(AtLeast(10))
.WillRepeatedly(CheckCountAndPostQuitTask(&count, 10, &loop));
ais->Start(&sink);
loop.Run();
ais->Stop();
-
- EXPECT_CALL(sink, OnClose(ais.get()))
- .Times(1);
ais.Close();
}
-// Test that we can capture loopback stream.
+// Test that we can capture a stream in loopback.
TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
if (!audio_manager->HasAudioOutputDevices() || !CoreAudioUtil::IsSupported())
@@ -459,6 +442,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
AudioParameters params = audio_manager->GetInputStreamParameters(
AudioManagerBase::kLoopbackInputDeviceId);
+ EXPECT_EQ(params.effects(), 0);
AudioParameters output_params =
audio_manager->GetOutputStreamParameters(std::string());
@@ -475,8 +459,7 @@ TEST(WinAudioInputTest, WASAPIAudioInputStreamLoopback) {
sink.WaitForData();
stream.Close();
- EXPECT_FALSE(sink.received_data().empty());
- EXPECT_TRUE(sink.closed());
+ EXPECT_GT(sink.num_received_audio_frames(), 0);
EXPECT_FALSE(sink.error());
}
@@ -501,7 +484,7 @@ TEST(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamRecordToFile) {
EXPECT_TRUE(ais->Open());
VLOG(0) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
- WriteToFileAudioSink file_sink(file_name);
+ WriteToFileAudioSink file_sink(file_name, aisw.bits_per_sample());
VLOG(0) << ">> Speak into the default microphone while recording.";
ais->Start(&file_sink);
base::PlatformThread::Sleep(TestTimeouts::action_timeout());
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.cc b/chromium/media/audio/win/audio_low_latency_output_win.cc
index a10e67a46cb..6aad434f6e6 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win.cc
@@ -25,19 +25,6 @@ using base::win::ScopedCoMem;
namespace media {
-// Compare two sets of audio parameters and return true if they are equal.
-// Note that bits_per_sample() is excluded from this comparison since Core
-// Audio can deal with most bit depths. As an example, if the native/mixing
-// bit depth is 32 bits (default), opening at 16 or 24 still works fine and
-// the audio engine will do the required conversion for us. Channel count is
-// excluded since Open() will fail anyways and it doesn't impact buffering.
-static bool CompareAudioParametersNoBitDepthOrChannels(
- const media::AudioParameters& a, const media::AudioParameters& b) {
- return (a.format() == b.format() &&
- a.sample_rate() == b.sample_rate() &&
- a.frames_per_buffer() == b.frames_per_buffer());
-}
-
// static
AUDCLNT_SHAREMODE WASAPIAudioOutputStream::GetShareMode() {
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -73,7 +60,6 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
manager_(manager),
format_(),
opened_(false),
- audio_parameters_are_valid_(false),
volume_(1.0),
packet_size_frames_(0),
packet_size_bytes_(0),
@@ -89,23 +75,6 @@ WASAPIAudioOutputStream::WASAPIAudioOutputStream(AudioManagerWin* manager,
VLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
<< "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // Verify that the input audio parameters are identical (bit depth and
- // channel count are excluded) to the preferred (native) audio parameters.
- // Open() will fail if this is not the case.
- AudioParameters preferred_params;
- HRESULT hr = device_id_.empty() ?
- CoreAudioUtil::GetPreferredAudioParameters(eRender, device_role,
- &preferred_params) :
- CoreAudioUtil::GetPreferredAudioParameters(device_id_,
- &preferred_params);
- audio_parameters_are_valid_ = SUCCEEDED(hr) &&
- CompareAudioParametersNoBitDepthOrChannels(params, preferred_params);
- LOG_IF(WARNING, !audio_parameters_are_valid_)
- << "Input and preferred parameters are not identical. "
- << "Device id: " << device_id_;
- }
-
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
bool avrt_init = avrt::Initialize();
DCHECK(avrt_init) << "Failed to load the avrt.dll";
@@ -159,18 +128,10 @@ bool WASAPIAudioOutputStream::Open() {
if (opened_)
return true;
- // Audio parameters must be identical to the preferred set of parameters
- // if shared mode (default) is utilized.
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!audio_parameters_are_valid_) {
- LOG(ERROR) << "Audio parameters are not valid.";
- return false;
- }
- }
-
// Create an IAudioClient interface for the default rendering IMMDevice.
ScopedComPtr<IAudioClient> audio_client;
- if (device_id_.empty()) {
+ if (device_id_.empty() ||
+ CoreAudioUtil::DeviceIsDefault(eRender, device_role_, device_id_)) {
audio_client = CoreAudioUtil::CreateDefaultClient(eRender, device_role_);
} else {
ScopedComPtr<IMMDevice> device(CoreAudioUtil::CreateDevice(device_id_));
@@ -186,6 +147,7 @@ bool WASAPIAudioOutputStream::Open() {
if (!CoreAudioUtil::IsFormatSupported(audio_client,
share_mode_,
&format_)) {
+ LOG(ERROR) << "Audio parameters are not supported.";
return false;
}
@@ -201,10 +163,13 @@ bool WASAPIAudioOutputStream::Open() {
// We know from experience that the best possible callback sequence is
// achieved when the packet size (given by the native device period)
- // is an even multiple of the endpoint buffer size.
+ // is an even divisor of the endpoint buffer size.
// Examples: 48kHz => 960 % 480, 44.1kHz => 896 % 448 or 882 % 441.
if (endpoint_buffer_size_frames_ % packet_size_frames_ != 0) {
- LOG(ERROR) << "Bailing out due to non-perfect timing.";
+ LOG(ERROR)
+ << "Bailing out due to non-perfect timing. Buffer size of "
+ << packet_size_frames_ << " is not an even divisor of "
+ << endpoint_buffer_size_frames_;
return false;
}
} else {
@@ -287,8 +252,7 @@ void WASAPIAudioOutputStream::Start(AudioSourceCallback* callback) {
// Start streaming data between the endpoint buffer and the audio engine.
HRESULT hr = audio_client_->Start();
if (FAILED(hr)) {
- LOG_GETLASTERROR(ERROR)
- << "Failed to start output streaming: " << std::hex << hr;
+ PLOG(ERROR) << "Failed to start output streaming: " << std::hex << hr;
StopThread();
callback->OnError(this);
}
@@ -303,8 +267,7 @@ void WASAPIAudioOutputStream::Stop() {
// Stop output audio streaming.
HRESULT hr = audio_client_->Stop();
if (FAILED(hr)) {
- LOG_GETLASTERROR(ERROR)
- << "Failed to stop output streaming: " << std::hex << hr;
+ PLOG(ERROR) << "Failed to stop output streaming: " << std::hex << hr;
source_->OnError(this);
}
@@ -315,8 +278,7 @@ void WASAPIAudioOutputStream::Stop() {
// Flush all pending data and reset the audio clock stream position to 0.
hr = audio_client_->Reset();
if (FAILED(hr)) {
- LOG_GETLASTERROR(ERROR)
- << "Failed to reset streaming: " << std::hex << hr;
+ PLOG(ERROR) << "Failed to reset streaming: " << std::hex << hr;
callback->OnError(this);
}
diff --git a/chromium/media/audio/win/audio_low_latency_output_win.h b/chromium/media/audio/win/audio_low_latency_output_win.h
index 2baf6f1ac9a..c118947d94f 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win.h
+++ b/chromium/media/audio/win/audio_low_latency_output_win.h
@@ -190,11 +190,6 @@ class MEDIA_EXPORT WASAPIAudioOutputStream :
// Set to true when stream is successfully opened.
bool opened_;
- // We check if the input audio parameters are identical (bit depth is
- // excluded) to the preferred (native) audio parameters during construction.
- // Open() will fail if |audio_parameters_are_valid_| is false.
- bool audio_parameters_are_valid_;
-
// Volume level from 0 to 1.
float volume_;
diff --git a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
index 5fda4b14509..ed03d2b714d 100644
--- a/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_low_latency_output_win_unittest.cc
@@ -16,6 +16,7 @@
#include "base/win/scoped_com_initializer.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/base/decoder_buffer.h"
@@ -60,16 +61,6 @@ ACTION_P(QuitLoop, loop) {
loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
}
-class MockAudioSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
// This audio source implementation should be used for manual tests only since
// it takes about 20 seconds to play out a file.
class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
@@ -139,13 +130,6 @@ class ReadFromFileAudioSource : public AudioOutputStream::AudioSourceCallback {
return frames;
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) OVERRIDE {
- NOTREACHED();
- return 0;
- }
-
virtual void OnError(AudioOutputStream* stream) {}
int file_size() { return file_->data_size(); }
@@ -233,7 +217,7 @@ class AudioOutputStreamWrapper {
AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(
AudioParameters(format_, channel_layout_, sample_rate_,
bits_per_sample_, samples_per_packet_),
- std::string(), std::string());
+ std::string());
EXPECT_TRUE(aos);
return aos;
}
@@ -432,27 +416,6 @@ TEST(WASAPIAudioOutputStreamTest, ValidPacketSize) {
aos->Close();
}
-// Use a non-preferred packet size and verify that Open() fails.
-TEST(WASAPIAudioOutputStreamTest, InvalidPacketSize) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunAudioTests(audio_manager.get()))
- return;
-
- if (ExclusiveModeIsEnabled())
- return;
-
- AudioParameters preferred_params;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &preferred_params)));
- int too_large_packet_size = 2 * preferred_params.frames_per_buffer();
-
- AudioOutputStreamWrapper aosw(audio_manager.get());
- AudioOutputStream* aos = aosw.Create(too_large_packet_size);
- EXPECT_FALSE(aos->Open());
-
- aos->Close();
-}
-
// This test is intended for manual tests and should only be enabled
// when it is required to play out data from a local PCM file.
// By default, GTest will print out YOU HAVE 1 DISABLED TEST.
diff --git a/chromium/media/audio/win/audio_manager_win.cc b/chromium/media/audio/win/audio_manager_win.cc
index 242813a8c65..eb05ca0547b 100644
--- a/chromium/media/audio/win/audio_manager_win.cc
+++ b/chromium/media/audio/win/audio_manager_win.cc
@@ -26,12 +26,11 @@
#include "media/audio/win/audio_low_latency_input_win.h"
#include "media/audio/win/audio_low_latency_output_win.h"
#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/audio_unified_win.h"
#include "media/audio/win/core_audio_util_win.h"
#include "media/audio/win/device_enumeration_win.h"
#include "media/audio/win/wavein_input_win.h"
#include "media/audio/win/waveout_output_win.h"
-#include "media/base/bind_to_loop.h"
+#include "media/base/bind_to_current_loop.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
#include "media/base/media_switches.h"
@@ -128,28 +127,30 @@ static int NumberOfWaveOutBuffers() {
}
AudioManagerWin::AudioManagerWin(AudioLogFactory* audio_log_factory)
- : AudioManagerBase(audio_log_factory) {
- if (!CoreAudioUtil::IsSupported()) {
- // Use the Wave API for device enumeration if XP or lower.
- enumeration_type_ = kWaveEnumeration;
- } else {
- // Use the MMDevice API for device enumeration if Vista or higher.
- enumeration_type_ = kMMDeviceEnumeration;
- }
-
+ : AudioManagerBase(audio_log_factory),
+ // |CoreAudioUtil::IsSupported()| uses static variables to avoid doing
+ // multiple initializations. This is however not thread safe.
+ // So, here we call it explicitly before we kick off the audio thread
+ // or do any other work.
+ enumeration_type_(CoreAudioUtil::IsSupported() ?
+ kMMDeviceEnumeration : kWaveEnumeration) {
SetMaxOutputStreamsAllowed(kMaxOutputStreams);
+ // WARNING: This is executed on the UI loop, do not add any code here which
+ // loads libraries or attempts to call out into the OS. Instead add such code
+ // to the InitializeOnAudioThread() method below.
+
// Task must be posted last to avoid races from handing out "this" to the
// audio thread.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerWin::CreateDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerWin::InitializeOnAudioThread, base::Unretained(this)));
}
AudioManagerWin::~AudioManagerWin() {
// It's safe to post a task here since Shutdown() will wait for all tasks to
// complete before returning.
- GetMessageLoop()->PostTask(FROM_HERE, base::Bind(
- &AudioManagerWin::DestroyDeviceListener, base::Unretained(this)));
+ GetTaskRunner()->PostTask(FROM_HERE, base::Bind(
+ &AudioManagerWin::ShutdownOnAudioThread, base::Unretained(this)));
Shutdown();
}
@@ -161,18 +162,20 @@ bool AudioManagerWin::HasAudioInputDevices() {
return (::waveInGetNumDevs() != 0);
}
-void AudioManagerWin::CreateDeviceListener() {
- // AudioDeviceListenerWin must be initialized on a COM thread and should only
- // be used if WASAPI / Core Audio is supported.
- if (CoreAudioUtil::IsSupported()) {
- output_device_listener_.reset(new AudioDeviceListenerWin(BindToLoop(
- GetMessageLoop(), base::Bind(
- &AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
- base::Unretained(this)))));
+void AudioManagerWin::InitializeOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+
+ if (core_audio_supported()) {
+ // AudioDeviceListenerWin must be initialized on a COM thread and should
+ // only be used if WASAPI / Core Audio is supported.
+ output_device_listener_.reset(new AudioDeviceListenerWin(BindToCurrentLoop(
+ base::Bind(&AudioManagerWin::NotifyAllOutputDeviceChangeListeners,
+ base::Unretained(this)))));
}
}
-void AudioManagerWin::DestroyDeviceListener() {
+void AudioManagerWin::ShutdownOnAudioThread() {
+ DCHECK(GetTaskRunner()->BelongsToCurrentThread());
output_device_listener_.reset();
}
@@ -243,7 +246,7 @@ base::string16 AudioManagerWin::GetAudioInputDeviceModel() {
void AudioManagerWin::ShowAudioInputSettings() {
std::wstring program;
std::string argument;
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
program = L"sndvol32.exe";
argument = "-R";
} else {
@@ -263,7 +266,6 @@ void AudioManagerWin::GetAudioDeviceNamesImpl(
bool input,
AudioDeviceNames* device_names) {
DCHECK(device_names->empty());
- DCHECK(enumeration_type() != kUninitializedEnumeration);
// Enumerate all active audio-endpoint capture devices.
if (enumeration_type() == kWaveEnumeration) {
// Utilize the Wave API for Windows XP.
@@ -299,26 +301,30 @@ void AudioManagerWin::GetAudioOutputDeviceNames(
AudioParameters AudioManagerWin::GetInputStreamParameters(
const std::string& device_id) {
- int sample_rate = 48000;
- ChannelLayout channel_layout = CHANNEL_LAYOUT_STEREO;
- if (CoreAudioUtil::IsSupported()) {
- int hw_sample_rate = WASAPIAudioInputStream::HardwareSampleRate(device_id);
- if (hw_sample_rate)
- sample_rate = hw_sample_rate;
- channel_layout =
- WASAPIAudioInputStream::HardwareChannelCount(device_id) == 1 ?
- CHANNEL_LAYOUT_MONO : CHANNEL_LAYOUT_STEREO;
+ AudioParameters parameters;
+ if (!core_audio_supported()) {
+ // Windows Wave implementation is being used.
+ parameters = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO, 0, 48000,
+ 16, kFallbackBufferSize, AudioParameters::NO_EFFECTS);
+ } else {
+ parameters = WASAPIAudioInputStream::GetInputStreamParameters(device_id);
}
- // TODO(Henrika): improve the default buffer size value for input stream.
- return AudioParameters(
- AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout,
- sample_rate, 16, kFallbackBufferSize);
+ int user_buffer_size = GetUserBufferSize();
+ if (user_buffer_size) {
+ parameters.Reset(parameters.format(), parameters.channel_layout(),
+ parameters.channels(), parameters.input_channels(),
+ parameters.sample_rate(), parameters.bits_per_sample(),
+ user_buffer_size);
+ }
+
+ return parameters;
}
std::string AudioManagerWin::GetAssociatedOutputDeviceID(
const std::string& input_device_id) {
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
NOTIMPLEMENTED()
<< "GetAssociatedOutputDeviceID is not supported on this OS";
return std::string();
@@ -348,13 +354,12 @@ AudioOutputStream* AudioManagerWin::MakeLinearOutputStream(
// - WASAPIAudioOutputStream: Based on Core Audio (WASAPI) API.
AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) {
+ const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
if (params.channels() > kWinMaxChannels)
return NULL;
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
DLOG_IF(ERROR, !device_id.empty() &&
device_id != AudioManagerBase::kDefaultDeviceId)
@@ -364,22 +369,14 @@ AudioOutputStream* AudioManagerWin::MakeLowLatencyOutputStream(
this, params, NumberOfWaveOutBuffers(), WAVE_MAPPER);
}
- // TODO(rtoy): support more than stereo input.
- if (params.input_channels() > 0) {
- DVLOG(1) << "WASAPIUnifiedStream is created.";
- DLOG_IF(ERROR, !device_id.empty() &&
- device_id != AudioManagerBase::kDefaultDeviceId)
- << "Opening by device id not supported by WASAPIUnifiedStream";
- return new WASAPIUnifiedStream(this, params, input_device_id);
- }
-
// Pass an empty string to indicate that we want the default device
// since we consistently only check for an empty string in
// WASAPIAudioOutputStream.
return new WASAPIAudioOutputStream(this,
device_id == AudioManagerBase::kDefaultDeviceId ?
std::string() : device_id,
- params, eConsole);
+ params,
+ params.effects() & AudioParameters::DUCKING ? eCommunications : eConsole);
}
// Factory for the implementations of AudioInputStream for AUDIO_PCM_LINEAR
@@ -395,8 +392,9 @@ AudioInputStream* AudioManagerWin::MakeLinearInputStream(
AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
const AudioParameters& params, const std::string& device_id) {
DCHECK_EQ(AudioParameters::AUDIO_PCM_LOW_LATENCY, params.format());
+ DVLOG(1) << "MakeLowLatencyInputStream: " << device_id;
AudioInputStream* stream = NULL;
- if (!CoreAudioUtil::IsSupported()) {
+ if (!core_audio_supported()) {
// Fall back to Windows Wave implementation on Windows XP or lower.
DVLOG(1) << "Using WaveIn since WASAPI requires at least Vista.";
stream = CreatePCMWaveInAudioInputStream(params, device_id);
@@ -408,7 +406,7 @@ AudioInputStream* AudioManagerWin::MakeLowLatencyInputStream(
}
std::string AudioManagerWin::GetDefaultOutputDeviceID() {
- if (!CoreAudioUtil::IsSupported())
+ if (!core_audio_supported())
return std::string();
return CoreAudioUtil::GetDefaultOutputDeviceID();
}
@@ -416,8 +414,7 @@ std::string AudioManagerWin::GetDefaultOutputDeviceID() {
AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
const std::string& output_device_id,
const AudioParameters& input_params) {
- const bool core_audio_supported = CoreAudioUtil::IsSupported();
- DLOG_IF(ERROR, !core_audio_supported && !output_device_id.empty())
+ DLOG_IF(ERROR, !core_audio_supported() && !output_device_id.empty())
<< "CoreAudio is required to open non-default devices.";
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
@@ -426,8 +423,9 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
int buffer_size = kFallbackBufferSize;
int bits_per_sample = 16;
int input_channels = 0;
- bool use_input_params = !core_audio_supported;
- if (core_audio_supported) {
+ int effects = AudioParameters::NO_EFFECTS;
+ bool use_input_params = !core_audio_supported();
+ if (core_audio_supported()) {
if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio)) {
// TODO(rtoy): tune these values for best possible WebAudio
// performance. WebRTC works well at 48kHz and a buffer size of 480
@@ -449,7 +447,13 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
buffer_size = params.frames_per_buffer();
channel_layout = params.channel_layout();
sample_rate = params.sample_rate();
+ effects = params.effects();
} else {
+ // TODO(tommi): This should never happen really and I'm not sure that
+ // setting use_input_params is the right thing to do since WASAPI i
+ // definitely supported (see core_audio_supported() above) and
+ // |use_input_params| is only for cases when it isn't supported.
+ DLOG(ERROR) << "GetPreferredAudioParameters failed: " << std::hex << hr;
use_input_params = true;
}
}
@@ -459,7 +463,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
// If the user has enabled checking supported channel layouts or we don't
// have a valid channel layout yet, try to use the input layout. See bugs
// http://crbug.com/259165 and http://crbug.com/311906 for more details.
- if (core_audio_supported &&
+ if (core_audio_supported() &&
(cmd_line->HasSwitch(switches::kTrySupportedChannelLayouts) ||
channel_layout == CHANNEL_LAYOUT_UNSUPPORTED)) {
// Check if it is possible to open up at the specified input channel
@@ -484,6 +488,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
}
}
input_channels = input_params.input_channels();
+ effects |= input_params.effects();
if (use_input_params) {
// If WASAPI isn't supported we'll fallback to WaveOut, which will take
// care of resampling and bits per sample changes. By setting these
@@ -503,7 +508,7 @@ AudioParameters AudioManagerWin::GetPreferredOutputStreamParameters(
return AudioParameters(
AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, input_channels,
- sample_rate, bits_per_sample, buffer_size, AudioParameters::NO_EFFECTS);
+ sample_rate, bits_per_sample, buffer_size, effects);
}
AudioInputStream* AudioManagerWin::CreatePCMWaveInAudioInputStream(
diff --git a/chromium/media/audio/win/audio_manager_win.h b/chromium/media/audio/win/audio_manager_win.h
index 01044da40a0..d8d51844c05 100644
--- a/chromium/media/audio/win/audio_manager_win.h
+++ b/chromium/media/audio/win/audio_manager_win.h
@@ -39,8 +39,7 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
const AudioParameters& params) OVERRIDE;
virtual AudioOutputStream* MakeLowLatencyOutputStream(
const AudioParameters& params,
- const std::string& device_id,
- const std::string& input_device_id) OVERRIDE;
+ const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLinearInputStream(
const AudioParameters& params, const std::string& device_id) OVERRIDE;
virtual AudioInputStream* MakeLowLatencyInputStream(
@@ -56,7 +55,6 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
private:
enum EnumerationType {
- kUninitializedEnumeration = 0,
kMMDeviceEnumeration,
kWaveEnumeration,
};
@@ -70,6 +68,10 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
enumeration_type_ = type;
}
+ inline bool core_audio_supported() const {
+ return enumeration_type_ == kMMDeviceEnumeration;
+ }
+
// Returns a PCMWaveInAudioInputStream instance or NULL on failure.
// This method converts MMDevice-style device ID to WaveIn-style device ID if
// necessary.
@@ -79,9 +81,10 @@ class MEDIA_EXPORT AudioManagerWin : public AudioManagerBase {
const AudioParameters& params,
const std::string& device_id);
- // Helper methods for constructing AudioDeviceListenerWin on the audio thread.
- void CreateDeviceListener();
- void DestroyDeviceListener();
+ // Helper methods for performing expensive initialization tasks on the audio
+ // thread instead of on the UI thread which AudioManager is constructed on.
+ void InitializeOnAudioThread();
+ void ShutdownOnAudioThread();
void GetAudioDeviceNamesImpl(bool input, AudioDeviceNames* device_names);
diff --git a/chromium/media/audio/win/audio_output_win_unittest.cc b/chromium/media/audio/win/audio_output_win_unittest.cc
index 2b8036d52a2..29026577ad8 100644
--- a/chromium/media/audio/win/audio_output_win_unittest.cc
+++ b/chromium/media/audio/win/audio_output_win_unittest.cc
@@ -15,6 +15,7 @@
#include "media/base/limits.h"
#include "media/audio/audio_io.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/mock_audio_source_callback.h"
#include "media/audio/simple_sources.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -36,6 +37,11 @@ namespace media {
static const wchar_t kAudioFile1_16b_m_16K[]
= L"media\\test\\data\\sweep02_16b_mono_16KHz.raw";
+static int ClearData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
+ audio_bus->Zero();
+ return audio_bus->frames();
+}
+
// This class allows to find out if the callbacks are occurring as
// expected and if any error has been reported.
class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
@@ -52,12 +58,6 @@ class TestSourceBasic : public AudioOutputStream::AudioSourceCallback {
audio_bus->Zero();
return audio_bus->frames();
}
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- }
// AudioSourceCallback::OnError implementation:
virtual void OnError(AudioOutputStream* stream) {
++had_error_;
@@ -102,21 +102,6 @@ class TestSourceLaggy : public TestSourceBasic {
int lag_in_ms_;
};
-class MockAudioSource : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-
- static int ClearData(AudioBus* audio_bus, AudioBuffersState buffers_state) {
- audio_bus->Zero();
- return audio_bus->frames();
- }
-};
-
// Helper class to memory map an entire file. The mapping is read-only. Don't
// use for gigabyte-sized files. Attempts to write to this memory generate
// memory access violations.
@@ -184,7 +169,7 @@ TEST(WinAudioTest, PCMWaveStreamGetAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
oas->Close();
}
@@ -200,29 +185,29 @@ TEST(WinAudioTest, SanityOnMakeParams) {
AudioParameters::Format fmt = AudioParameters::AUDIO_PCM_LINEAR;
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 1024 * 1024, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, 8000, 80, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_UNSUPPORTED, 8000, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_STEREO, -8000, 16, 256),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, -100),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16, 0),
- std::string(), std::string()));
+ std::string()));
EXPECT_TRUE(NULL == audio_man->MakeAudioOutputStream(
AudioParameters(fmt, CHANNEL_LAYOUT_MONO, 8000, 16,
media::limits::kMaxSamplesPerPacket + 1),
- std::string(), std::string()));
+ std::string()));
}
// Test that it can be opened and closed.
@@ -236,7 +221,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenAndClose) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 256),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
oas->Close();
@@ -253,7 +238,7 @@ TEST(WinAudioTest, PCMWaveStreamOpenLimit) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_STEREO,
8000, 16, 1024 * 1024 * 1024),
- std::string(), std::string());
+ std::string());
EXPECT_TRUE(NULL == oas);
if (oas)
oas->Close();
@@ -272,7 +257,7 @@ TEST(WinAudioTest, PCMWaveSlowSource) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
16000, 16, 256),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
TestSourceLaggy test_laggy(2, 90);
EXPECT_TRUE(oas->Open());
@@ -301,7 +286,7 @@ TEST(WinAudioTest, PCMWaveStreamPlaySlowLoop) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -332,7 +317,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -361,7 +346,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzTone22Kss) {
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate / 2, 16,
samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate/2);
@@ -401,7 +386,7 @@ TEST(WinAudioTest, PushSourceFile16KHz) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
kSampleRate, 16, kSamples100ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
EXPECT_TRUE(oas->Open());
@@ -438,7 +423,7 @@ TEST(WinAudioTest, PCMWaveStreamPlayTwice200HzTone44Kss) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200.0, AudioParameters::kAudioCDSampleRate);
@@ -485,7 +470,7 @@ TEST(WinAudioTest, PCMWaveStreamPlay200HzToneLowLatency) {
AudioParameters(AudioParameters::AUDIO_PCM_LOW_LATENCY,
CHANNEL_LAYOUT_MONO, sample_rate,
16, n * samples_10_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
SineWaveAudioSource source(1, 200, sample_rate);
@@ -519,10 +504,10 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(
AudioParameters(AudioParameters::AUDIO_PCM_LINEAR, CHANNEL_LAYOUT_MONO,
AudioParameters::kAudioCDSampleRate, 16, samples_100_ms),
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
- NiceMock<MockAudioSource> source;
+ NiceMock<MockAudioSourceCallback> source;
EXPECT_TRUE(oas->Open());
uint32 bytes_100_ms = samples_100_ms * 2;
@@ -537,18 +522,18 @@ TEST(WinAudioTest, PCMWaveStreamPendingBytes) {
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes, 0)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
+ .WillOnce(Invoke(ClearData));
// Note: If AudioManagerWin::NumberOfWaveOutBuffers() ever changes, or if this
// test is run on Vista, these expectations will fail.
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
+ .WillOnce(Invoke(ClearData));
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
2 * bytes_100_ms)))
- .WillOnce(Invoke(MockAudioSource::ClearData));
+ .WillOnce(Invoke(ClearData));
EXPECT_CALL(source, OnMoreData(NotNull(),
Field(&AudioBuffersState::pending_bytes,
2 * bytes_100_ms)))
@@ -606,7 +591,7 @@ class SyncSocketSource : public AudioOutputStream::AudioSourceCallback {
private:
base::SyncSocket* socket_;
int data_size_;
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data_;
+ scoped_ptr<float, base::AlignedFreeDeleter> data_;
scoped_ptr<AudioBus> audio_bus_;
};
@@ -629,7 +614,7 @@ DWORD __stdcall SyncSocketThread(void* context) {
SyncThreadContext& ctx = *(reinterpret_cast<SyncThreadContext*>(context));
// Setup AudioBus wrapping data we'll pass over the sync socket.
- scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> data(static_cast<float*>(
+ scoped_ptr<float, base::AlignedFreeDeleter> data(static_cast<float*>(
base::AlignedAlloc(ctx.packet_size_bytes, AudioBus::kChannelAlignment)));
scoped_ptr<AudioBus> audio_bus = AudioBus::WrapMemory(
ctx.channels, ctx.frames, data.get());
@@ -673,7 +658,7 @@ TEST(WinAudioTest, SyncSocketBasic) {
AudioOutputStream* oas = audio_man->MakeAudioOutputStream(params,
- std::string(), std::string());
+ std::string());
ASSERT_TRUE(NULL != oas);
ASSERT_TRUE(oas->Open());
diff --git a/chromium/media/audio/win/audio_unified_win.cc b/chromium/media/audio/win/audio_unified_win.cc
deleted file mode 100644
index 901c8b897fa..00000000000
--- a/chromium/media/audio/win/audio_unified_win.cc
+++ /dev/null
@@ -1,984 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "media/audio/win/audio_unified_win.h"
-
-#include <Functiondiscoverykeys_devpkey.h>
-
-#include "base/debug/trace_event.h"
-#ifndef NDEBUG
-#include "base/file_util.h"
-#include "base/path_service.h"
-#endif
-#include "base/time/time.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/win/audio_manager_win.h"
-#include "media/audio/win/avrt_wrapper_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-
-using base::win::ScopedComPtr;
-using base::win::ScopedCOMInitializer;
-using base::win::ScopedCoMem;
-
-// Smoothing factor in exponential smoothing filter where 0 < alpha < 1.
-// Larger values of alpha reduce the level of smoothing.
-// See http://en.wikipedia.org/wiki/Exponential_smoothing for details.
-static const double kAlpha = 0.1;
-
-// Compute a rate compensation which always attracts us back to a specified
-// target level over a period of |kCorrectionTimeSeconds|.
-static const double kCorrectionTimeSeconds = 0.1;
-
-#ifndef NDEBUG
-// Max number of columns in the output text file |kUnifiedAudioDebugFileName|.
-// See LogElementNames enumerator for details on what each column represents.
-static const size_t kMaxNumSampleTypes = 4;
-
-static const size_t kMaxNumParams = 2;
-
-// Max number of rows in the output file |kUnifiedAudioDebugFileName|.
-// Each row corresponds to one set of sample values for (approximately) the
-// same time instant (stored in the first column).
-static const size_t kMaxFileSamples = 10000;
-
-// Name of output debug file used for off-line analysis of measurements which
-// can be utilized for performance tuning of this class.
-static const char kUnifiedAudioDebugFileName[] = "unified_win_debug.txt";
-
-// Name of output debug file used for off-line analysis of measurements.
-// This file will contain a list of audio parameters.
-static const char kUnifiedAudioParamsFileName[] = "unified_win_params.txt";
-#endif
-
-// Use the acquired IAudioClock interface to derive a time stamp of the audio
-// sample which is currently playing through the speakers.
-static double SpeakerStreamPosInMilliseconds(IAudioClock* clock) {
- UINT64 device_frequency = 0, position = 0;
- if (FAILED(clock->GetFrequency(&device_frequency)) ||
- FAILED(clock->GetPosition(&position, NULL))) {
- return 0.0;
- }
- return base::Time::kMillisecondsPerSecond *
- (static_cast<double>(position) / device_frequency);
-}
-
-// Get a time stamp in milliseconds given number of audio frames in |num_frames|
-// using the current sample rate |fs| as scale factor.
-// Example: |num_frames| = 960 and |fs| = 48000 => 20 [ms].
-static double CurrentStreamPosInMilliseconds(UINT64 num_frames, DWORD fs) {
- return base::Time::kMillisecondsPerSecond *
- (static_cast<double>(num_frames) / fs);
-}
-
-// Convert a timestamp in milliseconds to byte units given the audio format
-// in |format|.
-// Example: |ts_milliseconds| equals 10, sample rate is 48000 and frame size
-// is 4 bytes per audio frame => 480 * 4 = 1920 [bytes].
-static int MillisecondsToBytes(double ts_milliseconds,
- const WAVEFORMATPCMEX& format) {
- double seconds = ts_milliseconds / base::Time::kMillisecondsPerSecond;
- return static_cast<int>(seconds * format.Format.nSamplesPerSec *
- format.Format.nBlockAlign + 0.5);
-}
-
-// Convert frame count to milliseconds given the audio format in |format|.
-static double FrameCountToMilliseconds(int num_frames,
- const WAVEFORMATPCMEX& format) {
- return (base::Time::kMillisecondsPerSecond * num_frames) /
- static_cast<double>(format.Format.nSamplesPerSec);
-}
-
-namespace media {
-
-WASAPIUnifiedStream::WASAPIUnifiedStream(AudioManagerWin* manager,
- const AudioParameters& params,
- const std::string& input_device_id)
- : creating_thread_id_(base::PlatformThread::CurrentId()),
- manager_(manager),
- params_(params),
- input_channels_(params.input_channels()),
- output_channels_(params.channels()),
- input_device_id_(input_device_id),
- share_mode_(CoreAudioUtil::GetShareMode()),
- opened_(false),
- volume_(1.0),
- output_buffer_size_frames_(0),
- input_buffer_size_frames_(0),
- endpoint_render_buffer_size_frames_(0),
- endpoint_capture_buffer_size_frames_(0),
- num_written_frames_(0),
- total_delay_ms_(0.0),
- total_delay_bytes_(0),
- source_(NULL),
- input_callback_received_(false),
- io_sample_rate_ratio_(1),
- target_fifo_frames_(0),
- average_delta_(0),
- fifo_rate_compensation_(1),
- update_output_delay_(false),
- capture_delay_ms_(0) {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::WASAPIUnifiedStream");
- VLOG(1) << "WASAPIUnifiedStream::WASAPIUnifiedStream()";
- DCHECK(manager_);
-
- VLOG(1) << "Input channels : " << input_channels_;
- VLOG(1) << "Output channels: " << output_channels_;
- VLOG(1) << "Sample rate : " << params_.sample_rate();
- VLOG(1) << "Buffer size : " << params.frames_per_buffer();
-
-#ifndef NDEBUG
- input_time_stamps_.reset(new int64[kMaxFileSamples]);
- num_frames_in_fifo_.reset(new int[kMaxFileSamples]);
- resampler_margin_.reset(new int[kMaxFileSamples]);
- fifo_rate_comps_.reset(new double[kMaxFileSamples]);
- num_elements_.reset(new int[kMaxNumSampleTypes]);
- std::fill(num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes, 0);
- input_params_.reset(new int[kMaxNumParams]);
- output_params_.reset(new int[kMaxNumParams]);
-#endif
-
- DVLOG_IF(1, share_mode_ == AUDCLNT_SHAREMODE_EXCLUSIVE)
- << "Core Audio (WASAPI) EXCLUSIVE MODE is enabled.";
-
- // Load the Avrt DLL if not already loaded. Required to support MMCSS.
- bool avrt_init = avrt::Initialize();
- DCHECK(avrt_init) << "Failed to load the avrt.dll";
-
- // All events are auto-reset events and non-signaled initially.
-
- // Create the event which the audio engine will signal each time a buffer
- // has been recorded.
- capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-
- // Create the event which will be set in Stop() when straeming shall stop.
- stop_streaming_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-}
-
-WASAPIUnifiedStream::~WASAPIUnifiedStream() {
- VLOG(1) << "WASAPIUnifiedStream::~WASAPIUnifiedStream()";
-#ifndef NDEBUG
- base::FilePath data_file_name;
- PathService::Get(base::DIR_EXE, &data_file_name);
- data_file_name = data_file_name.AppendASCII(kUnifiedAudioDebugFileName);
- data_file_ = base::OpenFile(data_file_name, "wt");
- DVLOG(1) << ">> Output file " << data_file_name.value() << " is created.";
-
- size_t n = 0;
- size_t elements_to_write = *std::min_element(
- num_elements_.get(), num_elements_.get() + kMaxNumSampleTypes);
- while (n < elements_to_write) {
- fprintf(data_file_, "%I64d %d %d %10.9f\n",
- input_time_stamps_[n],
- num_frames_in_fifo_[n],
- resampler_margin_[n],
- fifo_rate_comps_[n]);
- ++n;
- }
- base::CloseFile(data_file_);
-
- base::FilePath param_file_name;
- PathService::Get(base::DIR_EXE, &param_file_name);
- param_file_name = param_file_name.AppendASCII(kUnifiedAudioParamsFileName);
- param_file_ = base::OpenFile(param_file_name, "wt");
- DVLOG(1) << ">> Output file " << param_file_name.value() << " is created.";
- fprintf(param_file_, "%d %d\n", input_params_[0], input_params_[1]);
- fprintf(param_file_, "%d %d\n", output_params_[0], output_params_[1]);
- base::CloseFile(param_file_);
-#endif
-}
-
-bool WASAPIUnifiedStream::Open() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Open");
- DVLOG(1) << "WASAPIUnifiedStream::Open()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (opened_)
- return true;
-
- AudioParameters hw_output_params;
- HRESULT hr = CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &hw_output_params);
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get preferred output audio parameters.";
- return false;
- }
-
- AudioParameters hw_input_params;
- if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
- // Query native parameters for the default capture device.
- hr = CoreAudioUtil::GetPreferredAudioParameters(
- eCapture, eConsole, &hw_input_params);
- } else {
- // Query native parameters for the capture device given by
- // |input_device_id_|.
- hr = CoreAudioUtil::GetPreferredAudioParameters(
- input_device_id_, &hw_input_params);
- }
- if (FAILED(hr)) {
- LOG(ERROR) << "Failed to get preferred input audio parameters.";
- return false;
- }
-
- // It is currently only possible to open up the output audio device using
- // the native number of channels.
- if (output_channels_ != hw_output_params.channels()) {
- LOG(ERROR) << "Audio device does not support requested output channels.";
- return false;
- }
-
- // It is currently only possible to open up the input audio device using
- // the native number of channels. If the client asks for a higher channel
- // count, we will do channel upmixing in this class. The most typical
- // example is that the client provides stereo but the hardware can only be
- // opened in mono mode. We will do mono to stereo conversion in this case.
- if (input_channels_ < hw_input_params.channels()) {
- LOG(ERROR) << "Audio device does not support requested input channels.";
- return false;
- } else if (input_channels_ > hw_input_params.channels()) {
- ChannelLayout input_layout =
- GuessChannelLayout(hw_input_params.channels());
- ChannelLayout output_layout = GuessChannelLayout(input_channels_);
- channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
- DVLOG(1) << "Remixing input channel layout from " << input_layout
- << " to " << output_layout << "; from "
- << hw_input_params.channels() << " channels to "
- << input_channels_;
- }
-
- if (hw_output_params.sample_rate() != params_.sample_rate()) {
- LOG(ERROR) << "Requested sample-rate: " << params_.sample_rate()
- << " must match the hardware sample-rate: "
- << hw_output_params.sample_rate();
- return false;
- }
-
- if (hw_output_params.frames_per_buffer() != params_.frames_per_buffer()) {
- LOG(ERROR) << "Requested buffer size: " << params_.frames_per_buffer()
- << " must match the hardware buffer size: "
- << hw_output_params.frames_per_buffer();
- return false;
- }
-
- // Set up WAVEFORMATPCMEX structures for input and output given the specified
- // audio parameters.
- SetIOFormats(hw_input_params, params_);
-
- // Create the input and output busses.
- input_bus_ = AudioBus::Create(
- hw_input_params.channels(), input_buffer_size_frames_);
- output_bus_ = AudioBus::Create(params_);
-
- // One extra bus is needed for the input channel mixing case.
- if (channel_mixer_) {
- DCHECK_LT(hw_input_params.channels(), input_channels_);
- // The size of the |channel_bus_| must be the same as the size of the
- // output bus to ensure that the channel manager can deal with both
- // resampled and non-resampled data as input.
- channel_bus_ = AudioBus::Create(
- input_channels_, params_.frames_per_buffer());
- }
-
- // Check if FIFO and resampling is required to match the input rate to the
- // output rate. If so, a special thread loop, optimized for this case, will
- // be used. This mode is also called varispeed mode.
- // Note that we can also use this mode when input and output rates are the
- // same but native buffer sizes differ (can happen if two different audio
- // devices are used). For this case, the resampler uses a target ratio of
- // 1.0 but SetRatio is called to compensate for clock-drift. The FIFO is
- // required to compensate for the difference in buffer sizes.
- // TODO(henrika): we could perhaps improve the performance for the second
- // case here by only using the FIFO and avoid resampling. Not sure how much
- // that would give and we risk not compensation for clock drift.
- if (hw_input_params.sample_rate() != params_.sample_rate() ||
- hw_input_params.frames_per_buffer() != params_.frames_per_buffer()) {
- DoVarispeedInitialization(hw_input_params, params_);
- }
-
- // Render side (event driven only in varispeed mode):
-
- ScopedComPtr<IAudioClient> audio_output_client =
- CoreAudioUtil::CreateDefaultClient(eRender, eConsole);
- if (!audio_output_client)
- return false;
-
- if (!CoreAudioUtil::IsFormatSupported(audio_output_client,
- share_mode_,
- &output_format_)) {
- return false;
- }
-
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // The |render_event_| will be NULL unless varispeed mode is utilized.
- hr = CoreAudioUtil::SharedModeInitialize(
- audio_output_client, &output_format_, render_event_.Get(),
- &endpoint_render_buffer_size_frames_);
- } else {
- // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
- }
- if (FAILED(hr))
- return false;
-
- ScopedComPtr<IAudioRenderClient> audio_render_client =
- CoreAudioUtil::CreateRenderClient(audio_output_client);
- if (!audio_render_client)
- return false;
-
- // Capture side (always event driven but format depends on varispeed or not):
-
- ScopedComPtr<IAudioClient> audio_input_client;
- if (input_device_id_ == AudioManagerBase::kDefaultDeviceId) {
- audio_input_client = CoreAudioUtil::CreateDefaultClient(eCapture, eConsole);
- } else {
- ScopedComPtr<IMMDevice> audio_input_device(
- CoreAudioUtil::CreateDevice(input_device_id_));
- audio_input_client = CoreAudioUtil::CreateClient(audio_input_device);
- }
- if (!audio_input_client)
- return false;
-
- if (!CoreAudioUtil::IsFormatSupported(audio_input_client,
- share_mode_,
- &input_format_)) {
- return false;
- }
-
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- // Include valid event handle for event-driven initialization.
- // The input side is always event driven independent of if varispeed is
- // used or not.
- hr = CoreAudioUtil::SharedModeInitialize(
- audio_input_client, &input_format_, capture_event_.Get(),
- &endpoint_capture_buffer_size_frames_);
- } else {
- // TODO(henrika): add support for AUDCLNT_SHAREMODE_EXCLUSIVE.
- }
- if (FAILED(hr))
- return false;
-
- ScopedComPtr<IAudioCaptureClient> audio_capture_client =
- CoreAudioUtil::CreateCaptureClient(audio_input_client);
- if (!audio_capture_client)
- return false;
-
- // Varispeed mode requires additional preparations.
- if (VarispeedMode())
- ResetVarispeed();
-
- // Store all valid COM interfaces.
- audio_output_client_ = audio_output_client;
- audio_render_client_ = audio_render_client;
- audio_input_client_ = audio_input_client;
- audio_capture_client_ = audio_capture_client;
-
- opened_ = true;
- return SUCCEEDED(hr);
-}
-
-void WASAPIUnifiedStream::Start(AudioSourceCallback* callback) {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Start");
- DVLOG(1) << "WASAPIUnifiedStream::Start()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- CHECK(callback);
- CHECK(opened_);
-
- if (audio_io_thread_) {
- CHECK_EQ(callback, source_);
- return;
- }
-
- source_ = callback;
-
- if (VarispeedMode()) {
- ResetVarispeed();
- fifo_rate_compensation_ = 1.0;
- average_delta_ = 0.0;
- input_callback_received_ = false;
- update_output_delay_ = false;
- }
-
- // Create and start the thread that will listen for capture events.
- // We will also listen on render events on the same thread if varispeed
- // mode is utilized.
- audio_io_thread_.reset(
- new base::DelegateSimpleThread(this, "wasapi_io_thread"));
- audio_io_thread_->Start();
- if (!audio_io_thread_->HasBeenStarted()) {
- DLOG(ERROR) << "Failed to start WASAPI IO thread.";
- return;
- }
-
- // Start input streaming data between the endpoint buffer and the audio
- // engine.
- HRESULT hr = audio_input_client_->Start();
- if (FAILED(hr)) {
- StopAndJoinThread(hr);
- return;
- }
-
- // Ensure that the endpoint buffer is prepared with silence.
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- audio_output_client_, audio_render_client_)) {
- DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
- return;
- }
- }
- num_written_frames_ = endpoint_render_buffer_size_frames_;
-
- // Start output streaming data between the endpoint buffer and the audio
- // engine.
- hr = audio_output_client_->Start();
- if (FAILED(hr)) {
- StopAndJoinThread(hr);
- return;
- }
-}
-
-void WASAPIUnifiedStream::Stop() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Stop");
- DVLOG(1) << "WASAPIUnifiedStream::Stop()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
- if (!audio_io_thread_)
- return;
-
- // Stop input audio streaming.
- HRESULT hr = audio_input_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop input streaming: " << std::hex << hr;
- }
-
- // Stop output audio streaming.
- hr = audio_output_client_->Stop();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to stop output streaming: " << std::hex << hr;
- }
-
- // Wait until the thread completes and perform cleanup.
- SetEvent(stop_streaming_event_.Get());
- audio_io_thread_->Join();
- audio_io_thread_.reset();
-
- // Ensure that we don't quit the main thread loop immediately next
- // time Start() is called.
- ResetEvent(stop_streaming_event_.Get());
-
- // Clear source callback, it'll be set again on the next Start() call.
- source_ = NULL;
-
- // Flush all pending data and reset the audio clock stream position to 0.
- hr = audio_output_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset output streaming: " << std::hex << hr;
- }
-
- audio_input_client_->Reset();
- if (FAILED(hr)) {
- DLOG_IF(ERROR, hr != AUDCLNT_E_NOT_INITIALIZED)
- << "Failed to reset input streaming: " << std::hex << hr;
- }
-
- // Extra safety check to ensure that the buffers are cleared.
- // If the buffers are not cleared correctly, the next call to Start()
- // would fail with AUDCLNT_E_BUFFER_ERROR at IAudioRenderClient::GetBuffer().
- // TODO(henrika): this check is is only needed for shared-mode streams.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- DCHECK_EQ(0u, num_queued_frames);
-}
-
-void WASAPIUnifiedStream::Close() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::Close");
- DVLOG(1) << "WASAPIUnifiedStream::Close()";
- DCHECK_EQ(GetCurrentThreadId(), creating_thread_id_);
-
- // It is valid to call Close() before calling open or Start().
- // It is also valid to call Close() after Start() has been called.
- Stop();
-
- // Inform the audio manager that we have been closed. This will cause our
- // destruction.
- manager_->ReleaseOutputStream(this);
-}
-
-void WASAPIUnifiedStream::SetVolume(double volume) {
- DVLOG(1) << "SetVolume(volume=" << volume << ")";
- if (volume < 0 || volume > 1)
- return;
- volume_ = volume;
-}
-
-void WASAPIUnifiedStream::GetVolume(double* volume) {
- DVLOG(1) << "GetVolume()";
- *volume = static_cast<double>(volume_);
-}
-
-
-void WASAPIUnifiedStream::ProvideInput(int frame_delay, AudioBus* audio_bus) {
- // TODO(henrika): utilize frame_delay?
- // A non-zero framed delay means multiple callbacks were necessary to
- // fulfill the requested number of frames.
- if (frame_delay > 0)
- DVLOG(3) << "frame_delay: " << frame_delay;
-
-#ifndef NDEBUG
- resampler_margin_[num_elements_[RESAMPLER_MARGIN]] =
- fifo_->frames() - audio_bus->frames();
- num_elements_[RESAMPLER_MARGIN]++;
-#endif
-
- if (fifo_->frames() < audio_bus->frames()) {
- DVLOG(ERROR) << "Not enough data in the FIFO ("
- << fifo_->frames() << " < " << audio_bus->frames() << ")";
- audio_bus->Zero();
- return;
- }
-
- fifo_->Consume(audio_bus, 0, audio_bus->frames());
-}
-
-void WASAPIUnifiedStream::SetIOFormats(const AudioParameters& input_params,
- const AudioParameters& output_params) {
- for (int n = 0; n < 2; ++n) {
- const AudioParameters& params = (n == 0) ? input_params : output_params;
- WAVEFORMATPCMEX* xformat = (n == 0) ? &input_format_ : &output_format_;
- WAVEFORMATEX* format = &xformat->Format;
-
- // Begin with the WAVEFORMATEX structure that specifies the basic format.
- format->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- format->nChannels = params.channels();
- format->nSamplesPerSec = params.sample_rate();
- format->wBitsPerSample = params.bits_per_sample();
- format->nBlockAlign = (format->wBitsPerSample / 8) * format->nChannels;
- format->nAvgBytesPerSec = format->nSamplesPerSec * format->nBlockAlign;
- format->cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
-
- // Add the parts which are unique to WAVE_FORMAT_EXTENSIBLE.
- // Note that we always open up using the native channel layout.
- (*xformat).Samples.wValidBitsPerSample = format->wBitsPerSample;
- (*xformat).dwChannelMask =
- CoreAudioUtil::GetChannelConfig(
- std::string(), n == 0 ? eCapture : eRender);
- (*xformat).SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
- }
-
- input_buffer_size_frames_ = input_params.frames_per_buffer();
- output_buffer_size_frames_ = output_params.frames_per_buffer();
- VLOG(1) << "#audio frames per input buffer : " << input_buffer_size_frames_;
- VLOG(1) << "#audio frames per output buffer: " << output_buffer_size_frames_;
-
-#ifndef NDEBUG
- input_params_[0] = input_format_.Format.nSamplesPerSec;
- input_params_[1] = input_buffer_size_frames_;
- output_params_[0] = output_format_.Format.nSamplesPerSec;
- output_params_[1] = output_buffer_size_frames_;
-#endif
-}
-
-void WASAPIUnifiedStream::DoVarispeedInitialization(
- const AudioParameters& input_params, const AudioParameters& output_params) {
- DVLOG(1) << "WASAPIUnifiedStream::DoVarispeedInitialization()";
-
- // A FIFO is required in this mode for input to output buffering.
- // Note that it will add some latency.
- fifo_.reset(new AudioFifo(input_params.channels(), kFifoSize));
- VLOG(1) << "Using FIFO of size " << fifo_->max_frames()
- << " (#channels=" << input_params.channels() << ")";
-
- // Create the multi channel resampler using the initial sample rate ratio.
- // We will call MultiChannelResampler::SetRatio() during runtime to
- // allow arbitrary combinations of input and output devices running off
- // different clocks and using different drivers, with potentially
- // differing sample-rates. Note that the requested block size is given by
- // the native input buffer size |input_buffer_size_frames_|.
- io_sample_rate_ratio_ = input_params.sample_rate() /
- static_cast<double>(output_params.sample_rate());
- DVLOG(2) << "io_sample_rate_ratio: " << io_sample_rate_ratio_;
- resampler_.reset(new MultiChannelResampler(
- input_params.channels(), io_sample_rate_ratio_, input_buffer_size_frames_,
- base::Bind(&WASAPIUnifiedStream::ProvideInput, base::Unretained(this))));
- VLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
- << output_params.sample_rate();
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- // The actual size will vary but the goal is to ensure that the average size
- // is given by this value.
- target_fifo_frames_ = kTargetFifoSafetyFactor * input_buffer_size_frames_;
- VLOG(1) << "Target FIFO size: " << target_fifo_frames_;
-
- // Create the event which the audio engine will signal each time it
- // wants an audio buffer to render.
- render_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
-
- // Allocate memory for temporary audio bus used to store resampled input
- // audio.
- resampled_bus_ = AudioBus::Create(
- input_params.channels(), output_buffer_size_frames_);
-
- // Buffer initial silence corresponding to target I/O buffering.
- ResetVarispeed();
-}
-
-void WASAPIUnifiedStream::ResetVarispeed() {
- DCHECK(VarispeedMode());
-
- // Buffer initial silence corresponding to target I/O buffering.
- fifo_->Clear();
- scoped_ptr<AudioBus> silence =
- AudioBus::Create(input_format_.Format.nChannels,
- target_fifo_frames_);
- silence->Zero();
- fifo_->Push(silence.get());
- resampler_->Flush();
-}
-
-void WASAPIUnifiedStream::Run() {
- ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
-
- // Increase the thread priority.
- audio_io_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
-
- // Enable MMCSS to ensure that this thread receives prioritized access to
- // CPU resources.
- // TODO(henrika): investigate if it is possible to include these additional
- // settings in SetThreadPriority() as well.
- DWORD task_index = 0;
- HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
- &task_index);
- bool mmcss_is_ok =
- (mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
- if (!mmcss_is_ok) {
- // Failed to enable MMCSS on this thread. It is not fatal but can lead
- // to reduced QoS at high load.
- DWORD err = GetLastError();
- LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
- }
-
- // The IAudioClock interface enables us to monitor a stream's data
- // rate and the current position in the stream. Allocate it before we
- // start spinning.
- ScopedComPtr<IAudioClock> audio_output_clock;
- HRESULT hr = audio_output_client_->GetService(
- __uuidof(IAudioClock), audio_output_clock.ReceiveVoid());
- LOG_IF(WARNING, FAILED(hr)) << "Failed to create IAudioClock: "
- << std::hex << hr;
-
- bool streaming = true;
- bool error = false;
-
- HANDLE wait_array[3];
- size_t num_handles = 0;
- wait_array[num_handles++] = stop_streaming_event_;
- wait_array[num_handles++] = capture_event_;
- if (render_event_) {
- // One extra event handle is needed in varispeed mode.
- wait_array[num_handles++] = render_event_;
- }
-
- // Keep streaming audio until stop event is signaled.
- // Capture events are always used but render events are only active in
- // varispeed mode.
- while (streaming && !error) {
- // Wait for a close-down event, or a new capture event.
- DWORD wait_result = WaitForMultipleObjects(num_handles,
- wait_array,
- FALSE,
- INFINITE);
- switch (wait_result) {
- case WAIT_OBJECT_0 + 0:
- // |stop_streaming_event_| has been set.
- streaming = false;
- break;
- case WAIT_OBJECT_0 + 1:
- // |capture_event_| has been set
- if (VarispeedMode()) {
- ProcessInputAudio();
- } else {
- ProcessInputAudio();
- ProcessOutputAudio(audio_output_clock);
- }
- break;
- case WAIT_OBJECT_0 + 2:
- DCHECK(VarispeedMode());
- // |render_event_| has been set
- ProcessOutputAudio(audio_output_clock);
- break;
- default:
- error = true;
- break;
- }
- }
-
- if (streaming && error) {
- // Stop audio streaming since something has gone wrong in our main thread
- // loop. Note that, we are still in a "started" state, hence a Stop() call
- // is required to join the thread properly.
- audio_input_client_->Stop();
- audio_output_client_->Stop();
- PLOG(ERROR) << "WASAPI streaming failed.";
- }
-
- // Disable MMCSS.
- if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
- PLOG(WARNING) << "Failed to disable MMCSS";
- }
-}
-
-void WASAPIUnifiedStream::ProcessInputAudio() {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessInputAudio");
-
- BYTE* data_ptr = NULL;
- UINT32 num_captured_frames = 0;
- DWORD flags = 0;
- UINT64 device_position = 0;
- UINT64 capture_time_stamp = 0;
-
- const int bytes_per_sample = input_format_.Format.wBitsPerSample >> 3;
-
- base::TimeTicks now_tick = base::TimeTicks::HighResNow();
-
-#ifndef NDEBUG
- if (VarispeedMode()) {
- input_time_stamps_[num_elements_[INPUT_TIME_STAMP]] =
- now_tick.ToInternalValue();
- num_elements_[INPUT_TIME_STAMP]++;
- }
-#endif
-
- // Retrieve the amount of data in the capture endpoint buffer.
- // |endpoint_capture_time_stamp| is the value of the performance
- // counter at the time that the audio endpoint device recorded
- // the device position of the first audio frame in the data packet.
- HRESULT hr = audio_capture_client_->GetBuffer(&data_ptr,
- &num_captured_frames,
- &flags,
- &device_position,
- &capture_time_stamp);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to get data from the capture buffer";
- return;
- }
-
- if (hr == AUDCLNT_S_BUFFER_EMPTY) {
- // The return coded is a success code but a new packet is *not* available
- // and none of the output parameters in the GetBuffer() call contains valid
- // values. Best we can do is to deliver silence and avoid setting
- // |input_callback_received_| since this only seems to happen for the
- // initial event(s) on some devices.
- input_bus_->Zero();
- } else {
- // Valid data has been recorded and it is now OK to set the flag which
- // informs the render side that capturing has started.
- input_callback_received_ = true;
- }
-
- if (num_captured_frames != 0) {
- if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
- // Clear out the capture buffer since silence is reported.
- input_bus_->Zero();
- } else {
- // Store captured data in an audio bus after de-interleaving
- // the data to match the audio bus structure.
- input_bus_->FromInterleaved(
- data_ptr, num_captured_frames, bytes_per_sample);
- }
- }
-
- hr = audio_capture_client_->ReleaseBuffer(num_captured_frames);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
-
- // Buffer input into FIFO if varispeed mode is used. The render event
- // will drive resampling of this data to match the output side.
- if (VarispeedMode()) {
- int available_frames = fifo_->max_frames() - fifo_->frames();
- if (input_bus_->frames() <= available_frames) {
- fifo_->Push(input_bus_.get());
- }
-#ifndef NDEBUG
- num_frames_in_fifo_[num_elements_[NUM_FRAMES_IN_FIFO]] =
- fifo_->frames();
- num_elements_[NUM_FRAMES_IN_FIFO]++;
-#endif
- }
-
- // Save resource by not asking for new delay estimates each time.
- // These estimates are fairly stable and it is perfectly safe to only
- // sample at a rate of ~1Hz.
- // TODO(henrika): we might have to increase the update rate in varispeed
- // mode since the delay variations are higher in this mode.
- if ((now_tick - last_delay_sample_time_).InMilliseconds() >
- kTimeDiffInMillisecondsBetweenDelayMeasurements &&
- input_callback_received_) {
- // Calculate the estimated capture delay, i.e., the latency between
- // the recording time and the time we when we are notified about
- // the recorded data. Note that the capture time stamp is given in
- // 100-nanosecond (0.1 microseconds) units.
- base::TimeDelta diff =
- now_tick - base::TimeTicks::FromInternalValue(0.1 * capture_time_stamp);
- capture_delay_ms_ = diff.InMillisecondsF();
-
- last_delay_sample_time_ = now_tick;
- update_output_delay_ = true;
- }
-}
-
-void WASAPIUnifiedStream::ProcessOutputAudio(IAudioClock* audio_output_clock) {
- TRACE_EVENT0("audio", "WASAPIUnifiedStream::ProcessOutputAudio");
-
- if (!input_callback_received_) {
- if (share_mode_ == AUDCLNT_SHAREMODE_SHARED) {
- if (!CoreAudioUtil::FillRenderEndpointBufferWithSilence(
- audio_output_client_, audio_render_client_))
- DLOG(WARNING) << "Failed to prepare endpoint buffers with silence.";
- }
- return;
- }
-
- // Rate adjusted resampling is required in varispeed mode. It means that
- // recorded audio samples will be read from the FIFO, resampled to match the
- // output sample-rate and then stored in |resampled_bus_|.
- if (VarispeedMode()) {
- // Calculate a varispeed rate scalar factor to compensate for drift between
- // input and output. We use the actual number of frames still in the FIFO
- // compared with the ideal value of |target_fifo_frames_|.
- int delta = fifo_->frames() - target_fifo_frames_;
-
- // Average |delta| because it can jitter back/forth quite frequently
- // by +/- the hardware buffer-size *if* the input and output callbacks are
- // happening at almost exactly the same time. Also, if the input and output
- // sample-rates are different then |delta| will jitter quite a bit due to
- // the rate conversion happening in the varispeed, plus the jittering of
- // the callbacks. The average value is what's important here.
- // We use an exponential smoothing filter to reduce the variations.
- average_delta_ += kAlpha * (delta - average_delta_);
-
- // Compute a rate compensation which always attracts us back to the
- // |target_fifo_frames_| over a period of kCorrectionTimeSeconds.
- double correction_time_frames =
- kCorrectionTimeSeconds * output_format_.Format.nSamplesPerSec;
- fifo_rate_compensation_ =
- (correction_time_frames + average_delta_) / correction_time_frames;
-
-#ifndef NDEBUG
- fifo_rate_comps_[num_elements_[RATE_COMPENSATION]] =
- fifo_rate_compensation_;
- num_elements_[RATE_COMPENSATION]++;
-#endif
-
- // Adjust for FIFO drift.
- const double new_ratio = io_sample_rate_ratio_ * fifo_rate_compensation_;
- resampler_->SetRatio(new_ratio);
- // Get resampled input audio from FIFO where the size is given by the
- // output side.
- resampler_->Resample(resampled_bus_->frames(), resampled_bus_.get());
- }
-
- // Derive a new total delay estimate if the capture side has set the
- // |update_output_delay_| flag.
- if (update_output_delay_) {
- // Calculate the estimated render delay, i.e., the time difference
- // between the time when data is added to the endpoint buffer and
- // when the data is played out on the actual speaker.
- const double stream_pos = CurrentStreamPosInMilliseconds(
- num_written_frames_ + output_buffer_size_frames_,
- output_format_.Format.nSamplesPerSec);
- const double speaker_pos =
- SpeakerStreamPosInMilliseconds(audio_output_clock);
- const double render_delay_ms = stream_pos - speaker_pos;
- const double fifo_delay_ms = VarispeedMode() ?
- FrameCountToMilliseconds(target_fifo_frames_, input_format_) : 0;
-
- // Derive the total delay, i.e., the sum of the input and output
- // delays. Also convert the value into byte units. An extra FIFO delay
- // is added for varispeed usage cases.
- total_delay_ms_ = VarispeedMode() ?
- capture_delay_ms_ + render_delay_ms + fifo_delay_ms :
- capture_delay_ms_ + render_delay_ms;
- DVLOG(2) << "total_delay_ms : " << total_delay_ms_;
- DVLOG(3) << " capture_delay_ms: " << capture_delay_ms_;
- DVLOG(3) << " render_delay_ms : " << render_delay_ms;
- DVLOG(3) << " fifo_delay_ms : " << fifo_delay_ms;
- total_delay_bytes_ = MillisecondsToBytes(total_delay_ms_, output_format_);
-
- // Wait for new signal from the capture side.
- update_output_delay_ = false;
- }
-
- // Select source depending on if varispeed is utilized or not.
- // Also, the source might be the output of a channel mixer if channel mixing
- // is required to match the native input channels to the number of input
- // channels used by the client (given by |input_channels_| in this case).
- AudioBus* input_bus = VarispeedMode() ?
- resampled_bus_.get() : input_bus_.get();
- if (channel_mixer_) {
- DCHECK_EQ(input_bus->frames(), channel_bus_->frames());
- // Most common case is 1->2 channel upmixing.
- channel_mixer_->Transform(input_bus, channel_bus_.get());
- // Use the output from the channel mixer as new input bus.
- input_bus = channel_bus_.get();
- }
-
- // Prepare for rendering by calling OnMoreIOData().
- int frames_filled = source_->OnMoreIOData(
- input_bus,
- output_bus_.get(),
- AudioBuffersState(0, total_delay_bytes_));
- DCHECK_EQ(frames_filled, output_bus_->frames());
-
- // Keep track of number of rendered frames since we need it for
- // our delay calculations.
- num_written_frames_ += frames_filled;
-
- // Derive the the amount of available space in the endpoint buffer.
- // Avoid render attempt if there is no room for a captured packet.
- UINT32 num_queued_frames = 0;
- audio_output_client_->GetCurrentPadding(&num_queued_frames);
- if (endpoint_render_buffer_size_frames_ - num_queued_frames <
- output_buffer_size_frames_)
- return;
-
- // Grab all available space in the rendering endpoint buffer
- // into which the client can write a data packet.
- uint8* audio_data = NULL;
- HRESULT hr = audio_render_client_->GetBuffer(output_buffer_size_frames_,
- &audio_data);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to access render buffer";
- return;
- }
-
- const int bytes_per_sample = output_format_.Format.wBitsPerSample >> 3;
-
- // Convert the audio bus content to interleaved integer data using
- // |audio_data| as destination.
- output_bus_->Scale(volume_);
- output_bus_->ToInterleaved(
- output_buffer_size_frames_, bytes_per_sample, audio_data);
-
- // Release the buffer space acquired in the GetBuffer() call.
- audio_render_client_->ReleaseBuffer(output_buffer_size_frames_, 0);
- DLOG_IF(ERROR, FAILED(hr)) << "Failed to release render buffer";
-
- return;
-}
-
-void WASAPIUnifiedStream::HandleError(HRESULT err) {
- CHECK((started() && GetCurrentThreadId() == audio_io_thread_->tid()) ||
- (!started() && GetCurrentThreadId() == creating_thread_id_));
- NOTREACHED() << "Error code: " << std::hex << err;
- if (source_)
- source_->OnError(this);
-}
-
-void WASAPIUnifiedStream::StopAndJoinThread(HRESULT err) {
- CHECK(GetCurrentThreadId() == creating_thread_id_);
- DCHECK(audio_io_thread_.get());
- SetEvent(stop_streaming_event_.Get());
- audio_io_thread_->Join();
- audio_io_thread_.reset();
- HandleError(err);
-}
-
-} // namespace media
diff --git a/chromium/media/audio/win/audio_unified_win.h b/chromium/media/audio/win/audio_unified_win.h
deleted file mode 100644
index 76c53297b51..00000000000
--- a/chromium/media/audio/win/audio_unified_win.h
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
-#define MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
-
-#include <Audioclient.h>
-#include <MMDeviceAPI.h>
-
-#include <string>
-
-#include "base/compiler_specific.h"
-#include "base/gtest_prod_util.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/simple_thread.h"
-#include "base/win/scoped_co_mem.h"
-#include "base/win/scoped_comptr.h"
-#include "base/win/scoped_handle.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_parameters.h"
-#include "media/base/audio_fifo.h"
-#include "media/base/channel_mixer.h"
-#include "media/base/media_export.h"
-#include "media/base/multi_channel_resampler.h"
-
-namespace media {
-
-class AudioManagerWin;
-
-// Implementation of AudioOutputStream for Windows using the Core Audio API
-// where both capturing and rendering takes place on the same thread to enable
-// audio I/O. This class allows arbitrary combinations of input and output
-// devices running off different clocks and using different drivers, with
-// potentially differing sample-rates.
-//
-// It is required to first acquire the native sample rate of the selected
-// output device and then use the same rate when creating this object.
-// The inner operation depends on the input sample rate which is determined
-// during construction. Three different main modes are supported:
-//
-// 1) input rate == output rate => input side drives output side directly.
-// 2) input rate != output rate => both sides are driven independently by
-// events and a FIFO plus a resampling unit is used to compensate for
-// differences in sample rates between the two sides.
-// 3) input rate == output rate but native buffer sizes are not identical =>
-// same inner functionality as in (2) to compensate for the differences
-// in buffer sizes and also compensate for any potential clock drift
-// between the two devices.
-//
-// Mode detection is is done at construction and using mode (1) will lead to
-// best performance (lower delay and no "varispeed distortion"), i.e., it is
-// recommended to use same sample rates for input and output. Mode (2) uses a
-// resampler which supports rate adjustments to fine tune for things like
-// clock drift and differences in sample rates between different devices.
-// Mode (2) - which uses a FIFO and a adjustable multi-channel resampler -
-// is also called the varispeed mode and it is used for case (3) as well to
-// compensate for the difference in buffer sizes mainly.
-// Mode (3) can happen if two different audio devices are used.
-// As an example: some devices needs a buffer size of 441 @ 44.1kHz and others
-// 448 @ 44.1kHz. This is a rare case and will only happen for sample rates
-// which are even multiples of 11025 Hz (11025, 22050, 44100, 88200 etc.).
-//
-// Implementation notes:
-//
-// - Open() can fail if the input and output parameters do not fulfill
-// certain conditions. See source for Open() for more details.
-// - Channel mixing will be performed if the clients asks for a larger
-// number of channels than the native audio layer provides.
-// Example: client wants stereo but audio layer provides mono. In this case
-// upmixing from mono to stereo (1->2) will be done.
-//
-// TODO(henrika):
-//
-// - Add support for exclusive mode.
-// - Add support for KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, i.e., 32-bit float
-// as internal sample-value representation.
-// - Perform fine-tuning for non-matching sample rates to reduce latency.
-//
-class MEDIA_EXPORT WASAPIUnifiedStream
- : public AudioOutputStream,
- public base::DelegateSimpleThread::Delegate {
- public:
- // The ctor takes all the usual parameters, plus |manager| which is the
- // the audio manager who is creating this object.
- WASAPIUnifiedStream(AudioManagerWin* manager,
- const AudioParameters& params,
- const std::string& input_device_id);
-
- // The dtor is typically called by the AudioManager only and it is usually
- // triggered by calling AudioOutputStream::Close().
- virtual ~WASAPIUnifiedStream();
-
- // Implementation of AudioOutputStream.
- virtual bool Open() OVERRIDE;
- virtual void Start(AudioSourceCallback* callback) OVERRIDE;
- virtual void Stop() OVERRIDE;
- virtual void Close() OVERRIDE;
- virtual void SetVolume(double volume) OVERRIDE;
- virtual void GetVolume(double* volume) OVERRIDE;
-
- bool started() const {
- return audio_io_thread_.get() != NULL;
- }
-
- // Returns true if input sample rate differs from the output sample rate.
- // A FIFO and a adjustable multi-channel resampler are utilized in this mode.
- bool VarispeedMode() const { return (fifo_ && resampler_); }
-
- private:
- enum {
- // Time in milliseconds between two successive delay measurements.
- // We save resources by not updating the delay estimates for each capture
- // event (typically 100Hz rate).
- kTimeDiffInMillisecondsBetweenDelayMeasurements = 1000,
-
- // Max possible FIFO size.
- kFifoSize = 16384,
-
- // This value was determined empirically for minimum latency while still
- // guarding against FIFO under-runs. The actual target size will be equal
- // to kTargetFifoSafetyFactor * (native input buffer size).
- // TODO(henrika): tune this value for lowest possible latency for all
- // possible sample rate combinations.
- kTargetFifoSafetyFactor = 2
- };
-
- // Additional initialization required when input and output sample rate
- // differs. Allocates resources for |fifo_|, |resampler_|, |render_event_|,
- // and the |capture_bus_| and configures the |input_format_| structure
- // given the provided input and output audio parameters.
- void DoVarispeedInitialization(const AudioParameters& input_params,
- const AudioParameters& output_params);
-
- // Clears varispeed related components such as the FIFO and the resampler.
- void ResetVarispeed();
-
- // Builds WAVEFORMATEX structures for input and output based on input and
- // output audio parameters.
- void SetIOFormats(const AudioParameters& input_params,
- const AudioParameters& output_params);
-
- // DelegateSimpleThread::Delegate implementation.
- virtual void Run() OVERRIDE;
-
- // MultiChannelResampler::MultiChannelAudioSourceProvider implementation.
- // Callback for providing more data into the resampler.
- // Only used in varispeed mode, i.e., when input rate != output rate.
- virtual void ProvideInput(int frame_delay, AudioBus* audio_bus);
-
- // Issues the OnError() callback to the |source_|.
- void HandleError(HRESULT err);
-
- // Stops and joins the audio thread in case of an error.
- void StopAndJoinThread(HRESULT err);
-
- // Converts unique endpoint ID to user-friendly device name.
- std::string GetDeviceName(LPCWSTR device_id) const;
-
- // Called on the audio IO thread for each capture event.
- // Buffers captured audio into a FIFO if varispeed is used or into an audio
- // bus if input and output sample rates are identical.
- void ProcessInputAudio();
-
- // Called on the audio IO thread for each render event when varispeed is
- // active or for each capture event when varispeed is not used.
- // In varispeed mode, it triggers a resampling callback, which reads from the
- // FIFO, and calls AudioSourceCallback::OnMoreIOData using the resampled
- // input signal and at the same time asks for data to play out.
- // If input and output rates are the same - instead of reading from the FIFO
- // and do resampling - we read directly from the audio bus used to store
- // captured data in ProcessInputAudio.
- void ProcessOutputAudio(IAudioClock* audio_output_clock);
-
- // Contains the thread ID of the creating thread.
- base::PlatformThreadId creating_thread_id_;
-
- // Our creator, the audio manager needs to be notified when we close.
- AudioManagerWin* manager_;
-
- // Contains the audio parameter structure provided at construction.
- AudioParameters params_;
- // For convenience, same as in params_.
- int input_channels_;
- int output_channels_;
-
- // Unique ID of the input device to be opened.
- const std::string input_device_id_;
-
- // The sharing mode for the streams.
- // Valid values are AUDCLNT_SHAREMODE_SHARED and AUDCLNT_SHAREMODE_EXCLUSIVE
- // where AUDCLNT_SHAREMODE_SHARED is the default.
- AUDCLNT_SHAREMODE share_mode_;
-
- // Rendering and capturing is driven by this thread (no message loop).
- // All OnMoreIOData() callbacks will be called from this thread.
- scoped_ptr<base::DelegateSimpleThread> audio_io_thread_;
-
- // Contains the desired audio output format which is set up at construction.
- // It is required to first acquire the native sample rate of the selected
- // output device and then use the same rate when creating this object.
- WAVEFORMATPCMEX output_format_;
-
- // Contains the native audio input format which is set up at construction
- // if varispeed mode is utilized.
- WAVEFORMATPCMEX input_format_;
-
- // True when successfully opened.
- bool opened_;
-
- // Volume level from 0 to 1 used for output scaling.
- double volume_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the destination is expected to
- // receive in each OnMoreIOData() callback.
- size_t output_buffer_size_frames_;
-
- // Size in audio frames of each audio packet where an audio packet
- // is defined as the block of data which the source is expected to
- // deliver in each OnMoreIOData() callback.
- size_t input_buffer_size_frames_;
-
- // Length of the audio endpoint buffer.
- uint32 endpoint_render_buffer_size_frames_;
- uint32 endpoint_capture_buffer_size_frames_;
-
- // Counts the number of audio frames written to the endpoint buffer.
- uint64 num_written_frames_;
-
- // Time stamp for last delay measurement.
- base::TimeTicks last_delay_sample_time_;
-
- // Contains the total (sum of render and capture) delay in milliseconds.
- double total_delay_ms_;
-
- // Contains the total (sum of render and capture and possibly FIFO) delay
- // in bytes. The update frequency is set by a constant called
- // |kTimeDiffInMillisecondsBetweenDelayMeasurements|.
- int total_delay_bytes_;
-
- // Pointer to the client that will deliver audio samples to be played out.
- AudioSourceCallback* source_;
-
- // IMMDevice interfaces which represents audio endpoint devices.
- base::win::ScopedComPtr<IMMDevice> endpoint_render_device_;
- base::win::ScopedComPtr<IMMDevice> endpoint_capture_device_;
-
- // IAudioClient interfaces which enables a client to create and initialize
- // an audio stream between an audio application and the audio engine.
- base::win::ScopedComPtr<IAudioClient> audio_output_client_;
- base::win::ScopedComPtr<IAudioClient> audio_input_client_;
-
- // IAudioRenderClient interfaces enables a client to write output
- // data to a rendering endpoint buffer.
- base::win::ScopedComPtr<IAudioRenderClient> audio_render_client_;
-
- // IAudioCaptureClient interfaces enables a client to read input
- // data from a capturing endpoint buffer.
- base::win::ScopedComPtr<IAudioCaptureClient> audio_capture_client_;
-
- // The audio engine will signal this event each time a buffer has been
- // recorded.
- base::win::ScopedHandle capture_event_;
-
- // The audio engine will signal this event each time it needs a new
- // audio buffer to play out.
- // Only utilized in varispeed mode.
- base::win::ScopedHandle render_event_;
-
- // This event will be signaled when streaming shall stop.
- base::win::ScopedHandle stop_streaming_event_;
-
- // Container for retrieving data from AudioSourceCallback::OnMoreIOData().
- scoped_ptr<AudioBus> output_bus_;
-
- // Container for sending data to AudioSourceCallback::OnMoreIOData().
- scoped_ptr<AudioBus> input_bus_;
-
- // Container for storing output from the channel mixer.
- scoped_ptr<AudioBus> channel_bus_;
-
- // All members below are only allocated, or used, in varispeed mode:
-
- // Temporary storage of resampled input audio data.
- scoped_ptr<AudioBus> resampled_bus_;
-
- // Set to true first time a capture event has been received in varispeed
- // mode.
- bool input_callback_received_;
-
- // MultiChannelResampler is a multi channel wrapper for SincResampler;
- // allowing high quality sample rate conversion of multiple channels at once.
- scoped_ptr<MultiChannelResampler> resampler_;
-
- // Resampler I/O ratio.
- double io_sample_rate_ratio_;
-
- // Used for input to output buffering.
- scoped_ptr<AudioFifo> fifo_;
-
- // The channel mixer is only created and utilized if number of input channels
- // is larger than the native number of input channels (e.g client wants
- // stereo but the audio device only supports mono).
- scoped_ptr<ChannelMixer> channel_mixer_;
-
- // The optimal number of frames we'd like to keep in the FIFO at all times.
- int target_fifo_frames_;
-
- // A running average of the measured delta between actual number of frames
- // in the FIFO versus |target_fifo_frames_|.
- double average_delta_;
-
- // A varispeed rate scalar which is calculated based on FIFO drift.
- double fifo_rate_compensation_;
-
- // Set to true when input side signals output side that a new delay
- // estimate is needed.
- bool update_output_delay_;
-
- // Capture side stores its delay estimate so the sum can be derived in
- // the render side.
- double capture_delay_ms_;
-
- // TODO(henrika): possibly remove these members once the performance is
- // properly tuned. Only used for off-line debugging.
-#ifndef NDEBUG
- enum LogElementNames {
- INPUT_TIME_STAMP,
- NUM_FRAMES_IN_FIFO,
- RESAMPLER_MARGIN,
- RATE_COMPENSATION
- };
-
- scoped_ptr<int64[]> input_time_stamps_;
- scoped_ptr<int[]> num_frames_in_fifo_;
- scoped_ptr<int[]> resampler_margin_;
- scoped_ptr<double[]> fifo_rate_comps_;
- scoped_ptr<int[]> num_elements_;
- scoped_ptr<int[]> input_params_;
- scoped_ptr<int[]> output_params_;
-
- FILE* data_file_;
- FILE* param_file_;
-#endif
-
- DISALLOW_COPY_AND_ASSIGN(WASAPIUnifiedStream);
-};
-
-} // namespace media
-
-#endif // MEDIA_AUDIO_WIN_AUDIO_UNIFIED_WIN_H_
diff --git a/chromium/media/audio/win/audio_unified_win_unittest.cc b/chromium/media/audio/win/audio_unified_win_unittest.cc
deleted file mode 100644
index 15573aec76a..00000000000
--- a/chromium/media/audio/win/audio_unified_win_unittest.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/command_line.h"
-#include "base/file_util.h"
-#include "base/message_loop/message_loop.h"
-#include "base/path_service.h"
-#include "base/test/test_timeouts.h"
-#include "base/time/time.h"
-#include "base/win/scoped_com_initializer.h"
-#include "media/audio/audio_io.h"
-#include "media/audio/audio_manager.h"
-#include "media/audio/win/audio_unified_win.h"
-#include "media/audio/win/core_audio_util_win.h"
-#include "media/base/channel_mixer.h"
-#include "media/base/media_switches.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-using ::testing::_;
-using ::testing::AtLeast;
-using ::testing::Between;
-using ::testing::DoAll;
-using ::testing::NotNull;
-using ::testing::Return;
-using base::win::ScopedCOMInitializer;
-
-namespace media {
-
-static const size_t kMaxDeltaSamples = 1000;
-static const char kDeltaTimeMsFileName[] = "unified_delta_times_ms.txt";
-
-// Verify that the delay estimate in the OnMoreIOData() callback is larger
-// than an expected minumum value.
-MATCHER_P(DelayGreaterThan, value, "") {
- return (arg.hardware_delay_bytes > value.hardware_delay_bytes);
-}
-
-// Used to terminate a loop from a different thread than the loop belongs to.
-// |loop| should be a MessageLoopProxy.
-ACTION_P(QuitLoop, loop) {
- loop->PostTask(FROM_HERE, base::MessageLoop::QuitClosure());
-}
-
-class MockUnifiedSourceCallback
- : public AudioOutputStream::AudioSourceCallback {
- public:
- MOCK_METHOD2(OnMoreData, int(AudioBus* audio_bus,
- AudioBuffersState buffers_state));
- MOCK_METHOD3(OnMoreIOData, int(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state));
- MOCK_METHOD1(OnError, void(AudioOutputStream* stream));
-};
-
-// AudioOutputStream::AudioSourceCallback implementation which enables audio
-// play-through. It also creates a text file that contains times between two
-// successive callbacks. Units are in milliseconds. This file can be used for
-// off-line analysis of the callback sequence.
-class UnifiedSourceCallback : public AudioOutputStream::AudioSourceCallback {
- public:
- explicit UnifiedSourceCallback()
- : previous_call_time_(base::TimeTicks::Now()),
- text_file_(NULL),
- elements_to_write_(0) {
- delta_times_.reset(new int[kMaxDeltaSamples]);
- }
-
- virtual ~UnifiedSourceCallback() {
- base::FilePath file_name;
- EXPECT_TRUE(PathService::Get(base::DIR_EXE, &file_name));
- file_name = file_name.AppendASCII(kDeltaTimeMsFileName);
-
- EXPECT_TRUE(!text_file_);
- text_file_ = base::OpenFile(file_name, "wt");
- DLOG_IF(ERROR, !text_file_) << "Failed to open log file.";
- VLOG(0) << ">> Output file " << file_name.value() << " has been created.";
-
- // Write the array which contains delta times to a text file.
- size_t elements_written = 0;
- while (elements_written < elements_to_write_) {
- fprintf(text_file_, "%d\n", delta_times_[elements_written]);
- ++elements_written;
- }
- base::CloseFile(text_file_);
- }
-
- virtual int OnMoreData(AudioBus* dest,
- AudioBuffersState buffers_state) {
- NOTREACHED();
- return 0;
- };
-
- virtual int OnMoreIOData(AudioBus* source,
- AudioBus* dest,
- AudioBuffersState buffers_state) {
- // Store time between this callback and the previous callback.
- const base::TimeTicks now_time = base::TimeTicks::Now();
- const int diff = (now_time - previous_call_time_).InMilliseconds();
- previous_call_time_ = now_time;
- if (elements_to_write_ < kMaxDeltaSamples) {
- delta_times_[elements_to_write_] = diff;
- ++elements_to_write_;
- }
-
- // Play out the recorded audio samples in loop back. Perform channel mixing
- // if required using a channel mixer which is created only if needed.
- if (source->channels() == dest->channels()) {
- source->CopyTo(dest);
- } else {
- // A channel mixer is required for converting audio between two different
- // channel layouts.
- if (!channel_mixer_) {
- // Guessing the channel layout will work OK for this unit test.
- // Main thing is that the number of channels is correct.
- ChannelLayout input_layout = GuessChannelLayout(source->channels());
- ChannelLayout output_layout = GuessChannelLayout(dest->channels());
- channel_mixer_.reset(new ChannelMixer(input_layout, output_layout));
- DVLOG(1) << "Remixing channel layout from " << input_layout
- << " to " << output_layout << "; from "
- << source->channels() << " channels to "
- << dest->channels() << " channels.";
- }
- if (channel_mixer_)
- channel_mixer_->Transform(source, dest);
- }
- return source->frames();
- };
-
- virtual void OnError(AudioOutputStream* stream) {
- NOTREACHED();
- }
-
- private:
- base::TimeTicks previous_call_time_;
- scoped_ptr<int[]> delta_times_;
- FILE* text_file_;
- size_t elements_to_write_;
- scoped_ptr<ChannelMixer> channel_mixer_;
-};
-
-// Convenience method which ensures that we fulfill all required conditions
-// to run unified audio tests on Windows.
-static bool CanRunUnifiedAudioTests(AudioManager* audio_man) {
- if (!CoreAudioUtil::IsSupported()) {
- LOG(WARNING) << "This tests requires Windows Vista or higher.";
- return false;
- }
-
- if (!audio_man->HasAudioOutputDevices()) {
- LOG(WARNING) << "No output devices detected.";
- return false;
- }
-
- if (!audio_man->HasAudioInputDevices()) {
- LOG(WARNING) << "No input devices detected.";
- return false;
- }
-
- return true;
-}
-
-// Convenience class which simplifies creation of a unified AudioOutputStream
-// object.
-class AudioUnifiedStreamWrapper {
- public:
- explicit AudioUnifiedStreamWrapper(AudioManager* audio_manager)
- : com_init_(ScopedCOMInitializer::kMTA),
- audio_man_(audio_manager) {
- // We open up both both sides (input and output) using the preferred
- // set of audio parameters. These parameters corresponds to the mix format
- // that the audio engine uses internally for processing of shared-mode
- // output streams.
- AudioParameters out_params;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
- eRender, eConsole, &out_params)));
-
- // WebAudio is the only real user of unified audio and it always asks
- // for stereo.
- // TODO(henrika): extend support to other input channel layouts as well.
- const int kInputChannels = 2;
-
- params_.Reset(out_params.format(),
- out_params.channel_layout(),
- out_params.channels(),
- kInputChannels,
- out_params.sample_rate(),
- out_params.bits_per_sample(),
- out_params.frames_per_buffer());
- }
-
- ~AudioUnifiedStreamWrapper() {}
-
- // Creates an AudioOutputStream object using default parameters.
- WASAPIUnifiedStream* Create() {
- return static_cast<WASAPIUnifiedStream*>(CreateOutputStream());
- }
-
- // Creates an AudioOutputStream object using default parameters but a
- // specified input device.
- WASAPIUnifiedStream* Create(const std::string device_id) {
- return static_cast<WASAPIUnifiedStream*>(CreateOutputStream(device_id));
- }
-
- AudioParameters::Format format() const { return params_.format(); }
- int channels() const { return params_.channels(); }
- int bits_per_sample() const { return params_.bits_per_sample(); }
- int sample_rate() const { return params_.sample_rate(); }
- int frames_per_buffer() const { return params_.frames_per_buffer(); }
- int bytes_per_buffer() const { return params_.GetBytesPerBuffer(); }
- int input_channels() const { return params_.input_channels(); }
-
- private:
- AudioOutputStream* CreateOutputStream() {
- // Get the unique device ID of the default capture device instead of using
- // AudioManagerBase::kDefaultDeviceId since it provides slightly better
- // test coverage and will utilize the same code path as if a non default
- // input device was used.
- ScopedComPtr<IMMDevice> audio_device =
- CoreAudioUtil::CreateDefaultDevice(eCapture, eConsole);
- AudioDeviceName name;
- EXPECT_TRUE(SUCCEEDED(CoreAudioUtil::GetDeviceName(audio_device, &name)));
- const std::string& input_device_id = name.unique_id;
- EXPECT_TRUE(CoreAudioUtil::DeviceIsDefault(eCapture, eConsole,
- input_device_id));
-
- // Create the unified audio I/O stream using the default input device.
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- "", input_device_id);
- EXPECT_TRUE(aos);
- return aos;
- }
-
- AudioOutputStream* CreateOutputStream(const std::string& input_device_id) {
- // Create the unified audio I/O stream using the specified input device.
- AudioOutputStream* aos = audio_man_->MakeAudioOutputStream(params_,
- "", input_device_id);
- EXPECT_TRUE(aos);
- return aos;
- }
-
- ScopedCOMInitializer com_init_;
- AudioManager* audio_man_;
- AudioParameters params_;
-};
-
-// Convenience method which creates a default WASAPIUnifiedStream object.
-static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
- AudioManager* audio_manager) {
- AudioUnifiedStreamWrapper aosw(audio_manager);
- return aosw.Create();
-}
-
-// Convenience method which creates a default WASAPIUnifiedStream object but
-// with a specified audio input device.
-static WASAPIUnifiedStream* CreateDefaultUnifiedStream(
- AudioManager* audio_manager, const std::string& device_id) {
- AudioUnifiedStreamWrapper aosw(audio_manager);
- return aosw.Create(device_id);
-}
-
-// Test Open(), Close() calling sequence.
-TEST(WASAPIUnifiedStreamTest, OpenAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
- EXPECT_TRUE(wus->Open());
- wus->Close();
-}
-
-// Test Open(), Close() calling sequence for all available capture devices.
-TEST(WASAPIUnifiedStreamTest, OpenAndCloseForAllInputDevices) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- AudioDeviceNames device_names;
- audio_manager->GetAudioInputDeviceNames(&device_names);
- for (AudioDeviceNames::iterator i = device_names.begin();
- i != device_names.end(); ++i) {
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(
- audio_manager.get(), i->unique_id);
- EXPECT_TRUE(wus->Open());
- wus->Close();
- }
-}
-
-// Test Open(), Start(), Close() calling sequence.
-TEST(WASAPIUnifiedStreamTest, OpenStartAndClose) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- MockUnifiedSourceCallback source;
- AudioUnifiedStreamWrapper ausw(audio_manager.get());
- WASAPIUnifiedStream* wus = ausw.Create();
-
- EXPECT_TRUE(wus->Open());
- EXPECT_CALL(source, OnError(wus))
- .Times(0);
- EXPECT_CALL(source, OnMoreIOData(NotNull(), NotNull(), _))
- .Times(Between(0, 1))
- .WillOnce(Return(ausw.frames_per_buffer()));
- wus->Start(&source);
- wus->Close();
-}
-
-// Verify that IO callbacks starts as they should.
-TEST(WASAPIUnifiedStreamTest, StartLoopbackAudio) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- base::MessageLoopForUI loop;
- MockUnifiedSourceCallback source;
- AudioUnifiedStreamWrapper ausw(audio_manager.get());
- WASAPIUnifiedStream* wus = ausw.Create();
-
- // Set up expected minimum delay estimation where we use a minium delay
- // which is equal to the sum of render and capture sizes. We can never
- // reach a delay lower than this value.
- AudioBuffersState min_total_audio_delay(0, 2 * ausw.bytes_per_buffer());
-
- EXPECT_TRUE(wus->Open());
- EXPECT_CALL(source, OnError(wus))
- .Times(0);
- EXPECT_CALL(source, OnMoreIOData(
- NotNull(), NotNull(), DelayGreaterThan(min_total_audio_delay)))
- .Times(AtLeast(2))
- .WillOnce(Return(ausw.frames_per_buffer()))
- .WillOnce(DoAll(
- QuitLoop(loop.message_loop_proxy()),
- Return(ausw.frames_per_buffer())));
- wus->Start(&source);
- loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
- TestTimeouts::action_timeout());
- loop.Run();
- wus->Stop();
- wus->Close();
-}
-
-// Perform a real-time test in loopback where the recorded audio is echoed
-// back to the speaker. This test allows the user to verify that the audio
-// sounds OK. A text file with name |kDeltaTimeMsFileName| is also generated.
-TEST(WASAPIUnifiedStreamTest, DISABLED_RealTimePlayThrough) {
- scoped_ptr<AudioManager> audio_manager(AudioManager::CreateForTesting());
- if (!CanRunUnifiedAudioTests(audio_manager.get()))
- return;
-
- base::MessageLoopForUI loop;
- UnifiedSourceCallback source;
- WASAPIUnifiedStream* wus = CreateDefaultUnifiedStream(audio_manager.get());
-
- EXPECT_TRUE(wus->Open());
- wus->Start(&source);
- loop.PostDelayedTask(FROM_HERE, base::MessageLoop::QuitClosure(),
- base::TimeDelta::FromMilliseconds(10000));
- loop.Run();
- wus->Close();
-}
-
-} // namespace media
diff --git a/chromium/media/audio/win/core_audio_util_win.cc b/chromium/media/audio/win/core_audio_util_win.cc
index 790b2b140f7..71e8d717f62 100644
--- a/chromium/media/audio/win/core_audio_util_win.cc
+++ b/chromium/media/audio/win/core_audio_util_win.cc
@@ -146,7 +146,7 @@ static std::string GetDeviceID(IMMDevice* device) {
ScopedCoMem<WCHAR> device_id_com;
std::string device_id;
if (SUCCEEDED(device->GetId(&device_id_com)))
- WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
+ base::WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
return device_id;
}
@@ -154,7 +154,7 @@ bool CoreAudioUtil::IsSupported() {
// It is possible to force usage of WaveXxx APIs by using a command line flag.
const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
- LOG(WARNING) << "Forcing usage of Windows WaveXxx APIs";
+ DVLOG(1) << "Forcing usage of Windows WaveXxx APIs";
return false;
}
@@ -230,6 +230,18 @@ ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
NULL, CLSCTX_INPROC_SERVER);
+ if (hr == CO_E_NOTINITIALIZED) {
+ LOG(ERROR) << "CoCreateInstance fails with CO_E_NOTINITIALIZED";
+ // We have seen crashes which indicates that this method can in fact
+ // fail with CO_E_NOTINITIALIZED in combination with certain 3rd party
+ // modules. Calling CoInitializeEx is an attempt to resolve the reported
+ // issues. See http://crbug.com/378465 for details.
+ hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
+ if (SUCCEEDED(hr)) {
+ hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
+ NULL, CLSCTX_INPROC_SERVER);
+ }
+ }
CHECK(SUCCEEDED(hr));
return device_enumerator;
}
@@ -288,8 +300,8 @@ ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
// Retrieve an audio device specified by an endpoint device-identification
// string.
- HRESULT hr = device_enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
- endpoint_device.Receive());
+ HRESULT hr = device_enumerator->GetDevice(
+ base::UTF8ToUTF16(device_id).c_str(), endpoint_device.Receive());
DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
<< std::hex << hr;
return endpoint_device;
@@ -316,9 +328,9 @@ HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
if (FAILED(hr))
return hr;
if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
- WideToUTF8(friendly_name.get().pwszVal,
- wcslen(friendly_name.get().pwszVal),
- &device_name.device_name);
+ base::WideToUTF8(friendly_name.get().pwszVal,
+ wcslen(friendly_name.get().pwszVal),
+ &device_name.device_name);
}
*name = device_name;
@@ -367,9 +379,9 @@ std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
}
std::string controller_id;
- WideToUTF8(instance_id.get().pwszVal,
- wcslen(instance_id.get().pwszVal),
- &controller_id);
+ base::WideToUTF8(instance_id.get().pwszVal,
+ wcslen(instance_id.get().pwszVal),
+ &controller_id);
return controller_id;
}
@@ -672,7 +684,20 @@ HRESULT CoreAudioUtil::GetPreferredAudioParameters(
// actual error code. The exact value is not important here.
return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
}
- return GetPreferredAudioParameters(client, params);
+
+ HRESULT hr = GetPreferredAudioParameters(client, params);
+ if (FAILED(hr))
+ return hr;
+
+ if (role == eCommunications) {
+ // Raise the 'DUCKING' flag for default communication devices.
+ *params = AudioParameters(params->format(), params->channel_layout(),
+ params->channels(), params->input_channels(), params->sample_rate(),
+ params->bits_per_sample(), params->frames_per_buffer(),
+ params->effects() | AudioParameters::DUCKING);
+ }
+
+ return hr;
}
HRESULT CoreAudioUtil::GetPreferredAudioParameters(
diff --git a/chromium/media/audio/win/core_audio_util_win.h b/chromium/media/audio/win/core_audio_util_win.h
index a210af906ea..8727f97b51c 100644
--- a/chromium/media/audio/win/core_audio_util_win.h
+++ b/chromium/media/audio/win/core_audio_util_win.h
@@ -4,7 +4,7 @@
// Utility methods for the Core Audio API on Windows.
// Always ensure that Core Audio is supported before using these methods.
-// Use media::CoreAudioIsSupported() for this purpose.
+// Use media::CoreAudioUtil::IsSupported() for this purpose.
// Also, all methods must be called on a valid COM thread. This can be done
// by using the base::win::ScopedCOMInitializer helper class.
@@ -37,6 +37,8 @@ class MEDIA_EXPORT CoreAudioUtil {
// Returns true if Windows Core Audio is supported.
// Always verify that this method returns true before using any of the
// methods in this class.
+ // WARNING: This function must be called once from the main thread before
+ // it is safe to call from other threads.
static bool IsSupported();
// Converts between reference time to base::TimeDelta.
diff --git a/chromium/media/audio/win/core_audio_util_win_unittest.cc b/chromium/media/audio/win/core_audio_util_win_unittest.cc
index e9ed0c4f597..f18878cb06a 100644
--- a/chromium/media/audio/win/core_audio_util_win_unittest.cc
+++ b/chromium/media/audio/win/core_audio_util_win_unittest.cc
@@ -504,7 +504,7 @@ TEST_F(CoreAudioUtilWinTest, GetMatchingOutputDeviceID) {
base::win::ScopedCoMem<WCHAR> wide_id;
device->GetId(&wide_id);
std::string id;
- WideToUTF8(wide_id, wcslen(wide_id), &id);
+ base::WideToUTF8(wide_id, wcslen(wide_id), &id);
found_a_pair = !CoreAudioUtil::GetMatchingOutputDeviceID(id).empty();
}
diff --git a/chromium/media/audio/win/device_enumeration_win.cc b/chromium/media/audio/win/device_enumeration_win.cc
index aa66afb12b1..1beddbbe939 100644
--- a/chromium/media/audio/win/device_enumeration_win.cc
+++ b/chromium/media/audio/win/device_enumeration_win.cc
@@ -68,7 +68,8 @@ static bool GetDeviceNamesWinImpl(EDataFlow data_flow,
// Store the unique name.
ScopedCoMem<WCHAR> endpoint_device_id;
audio_device->GetId(&endpoint_device_id);
- device.unique_id = WideToUTF8(static_cast<WCHAR*>(endpoint_device_id));
+ device.unique_id =
+ base::WideToUTF8(static_cast<WCHAR*>(endpoint_device_id));
// Retrieve user-friendly name of endpoint device.
// Example: "Microphone (Realtek High Definition Audio)".
@@ -82,7 +83,7 @@ static bool GetDeviceNamesWinImpl(EDataFlow data_flow,
// Store the user-friendly name.
if (SUCCEEDED(hr) &&
friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
- device.device_name = WideToUTF8(friendly_name.get().pwszVal);
+ device.device_name = base::WideToUTF8(friendly_name.get().pwszVal);
}
}
@@ -124,7 +125,7 @@ static bool GetDeviceNamesWinXPImpl(AudioDeviceNames* device_names) {
// Store the user-friendly name. Max length is MAXPNAMELEN(=32)
// characters and the name cane be truncated on XP.
// Example: "Microphone (Realtek High Defini".
- device.device_name = WideToUTF8(capabilities.szPname);
+ device.device_name = base::WideToUTF8(capabilities.szPname);
// Store the "unique" name (we use same as friendly name on Windows XP).
device.unique_id = device.device_name;
@@ -181,7 +182,7 @@ std::string ConvertToWinXPInputDeviceId(const std::string& device_id) {
if (result != MMSYSERR_NOERROR)
continue;
- std::string utf8_id = WideToUTF8(static_cast<WCHAR*>(id));
+ std::string utf8_id = base::WideToUTF8(static_cast<WCHAR*>(id));
// Check whether the endpoint ID string of this waveIn device matches that
// of the audio endpoint device.
if (device_id == utf8_id)
@@ -195,7 +196,7 @@ std::string ConvertToWinXPInputDeviceId(const std::string& device_id) {
result = waveInGetDevCaps(i, &capabilities, sizeof(capabilities));
if (result == MMSYSERR_NOERROR)
- return WideToUTF8(capabilities.szPname);
+ return base::WideToUTF8(capabilities.szPname);
}
return std::string();
diff --git a/chromium/media/audio/win/wavein_input_win.cc b/chromium/media/audio/win/wavein_input_win.cc
index 05771250e01..f12bcf244c5 100644
--- a/chromium/media/audio/win/wavein_input_win.cc
+++ b/chromium/media/audio/win/wavein_input_win.cc
@@ -10,6 +10,7 @@
#include "media/audio/audio_io.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/device_enumeration_win.h"
+#include "media/base/audio_bus.h"
namespace media {
@@ -20,7 +21,9 @@ static WAVEHDR* GetNextBuffer(WAVEHDR* current) {
}
PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
- AudioManagerWin* manager, const AudioParameters& params, int num_buffers,
+ AudioManagerWin* manager,
+ const AudioParameters& params,
+ int num_buffers,
const std::string& device_id)
: state_(kStateEmpty),
manager_(manager),
@@ -29,7 +32,8 @@ PCMWaveInAudioInputStream::PCMWaveInAudioInputStream(
callback_(NULL),
num_buffers_(num_buffers),
buffer_(NULL),
- channels_(params.channels()) {
+ channels_(params.channels()),
+ audio_bus_(media::AudioBus::Create(params)) {
DCHECK_GT(num_buffers_, 0);
format_.wFormatTag = WAVE_FORMAT_PCM;
format_.nChannels = params.channels() > 2 ? 2 : params.channels();
@@ -224,7 +228,8 @@ bool PCMWaveInAudioInputStream::GetAutomaticGainControl() {
void PCMWaveInAudioInputStream::HandleError(MMRESULT error) {
DLOG(WARNING) << "PCMWaveInAudio error " << error;
- callback_->OnError(this);
+ if (callback_)
+ callback_->OnError(this);
}
void PCMWaveInAudioInputStream::QueueNextPacket(WAVEHDR *buffer) {
@@ -289,11 +294,11 @@ void PCMWaveInAudioInputStream::WaveCallback(HWAVEIN hwi, UINT msg,
// there is currently no support for controlling the microphone volume
// level.
WAVEHDR* buffer = reinterpret_cast<WAVEHDR*>(param1);
- obj->callback_->OnData(obj,
- reinterpret_cast<const uint8*>(buffer->lpData),
- buffer->dwBytesRecorded,
- buffer->dwBytesRecorded,
- 0.0);
+ obj->audio_bus_->FromInterleaved(reinterpret_cast<uint8*>(buffer->lpData),
+ obj->audio_bus_->frames(),
+ obj->format_.wBitsPerSample / 8);
+ obj->callback_->OnData(
+ obj, obj->audio_bus_.get(), buffer->dwBytesRecorded, 0.0);
// Queue the finished buffer back with the audio driver. Since we are
// reusing the same buffers we can get away without calling
diff --git a/chromium/media/audio/win/wavein_input_win.h b/chromium/media/audio/win/wavein_input_win.h
index df5ce4d129b..5b1edd59fb3 100644
--- a/chromium/media/audio/win/wavein_input_win.h
+++ b/chromium/media/audio/win/wavein_input_win.h
@@ -20,6 +20,7 @@
namespace media {
+class AudioBus;
class AudioManagerWin;
class PCMWaveInAudioInputStream : public AudioInputStream {
@@ -123,6 +124,10 @@ class PCMWaveInAudioInputStream : public AudioInputStream {
// Lock used to avoid conflicts when Stop() is called during a callback.
base::Lock lock_;
+ // Extra audio bus used for storage of deinterleaved data for the OnData
+ // callback.
+ scoped_ptr<media::AudioBus> audio_bus_;
+
DISALLOW_COPY_AND_ASSIGN(PCMWaveInAudioInputStream);
};