summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/resonance-audio/resonance_audio/graph
diff options
context:
space:
mode:
Diffstat (limited to 'src/3rdparty/resonance-audio/resonance_audio/graph')
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.cc111
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.h88
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.cc69
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.h71
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node_test.cc150
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.cc495
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.h190
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl_test.cc202
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.cc46
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.h62
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.cc67
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.h54
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.cc66
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.h68
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node_test.cc246
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.cc82
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.h69
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/gain_node_test.cc138
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.cc290
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.h404
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager_config.h40
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.cc69
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.h55
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.cc57
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.h54
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node_test.cc112
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.cc45
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.h44
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.cc117
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.h69
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node_test.cc127
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.cc101
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.h59
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node_test.cc186
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.cc113
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.h78
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.cc586
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.h175
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.cc162
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.h99
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/source_graph_config.h43
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.cc58
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.h68
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager_test.cc91
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.cc65
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.h61
-rw-r--r--src/3rdparty/resonance-audio/resonance_audio/graph/system_settings.h189
47 files changed, 5891 insertions, 0 deletions
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.cc
new file mode 100644
index 000000000..a3c8ff0cb
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.cc
@@ -0,0 +1,111 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/ambisonic_binaural_decoder_node.h"
+
+#include "ambisonics/stereo_from_soundfield_converter.h"
+#include "ambisonics/utils.h"
+#include "base/constants_and_types.h"
+
+#include "dsp/sh_hrir_creator.h"
+
+namespace vraudio {
+
+AmbisonicBinauralDecoderNode::AmbisonicBinauralDecoderNode(
+ const SystemSettings& system_settings, int ambisonic_order,
+ const std::string& sh_hrir_filename, FftManager* fft_manager,
+ Resampler* resampler)
+ : system_settings_(system_settings),
+ num_ambisonic_channels_(GetNumPeriphonicComponents(ambisonic_order)),
+ is_stereo_speaker_mode_(system_settings_.IsStereoSpeakerModeEnabled()),
+ num_frames_processed_on_empty_input_(
+ system_settings_.GetFramesPerBuffer()),
+ stereo_output_buffer_(kNumStereoChannels,
+ system_settings.GetFramesPerBuffer()),
+ silence_input_buffer_(num_ambisonic_channels_,
+ system_settings.GetFramesPerBuffer()),
+ crossfader_(system_settings_.GetFramesPerBuffer()),
+ crossfaded_output_buffer_(kNumStereoChannels,
+ system_settings.GetFramesPerBuffer()),
+ temp_crossfade_buffer_(kNumStereoChannels,
+ system_settings.GetFramesPerBuffer()) {
+ silence_input_buffer_.Clear();
+ EnableProcessOnEmptyInput(true);
+ std::unique_ptr<AudioBuffer> sh_hrirs = CreateShHrirsFromAssets(
+ sh_hrir_filename, system_settings_.GetSampleRateHz(), resampler);
+ CHECK_EQ(sh_hrirs->num_channels(), num_ambisonic_channels_);
+ ambisonic_binaural_decoder_.reset(new AmbisonicBinauralDecoder(
+ *sh_hrirs, system_settings_.GetFramesPerBuffer(), fft_manager));
+}
+
+AmbisonicBinauralDecoderNode::~AmbisonicBinauralDecoderNode() {}
+
+const AudioBuffer* AmbisonicBinauralDecoderNode::AudioProcess(
+ const NodeInput& input) {
+
+
+ const bool was_stereo_speaker_mode_enabled = is_stereo_speaker_mode_;
+ is_stereo_speaker_mode_ = system_settings_.IsStereoSpeakerModeEnabled();
+
+ const size_t num_frames = system_settings_.GetFramesPerBuffer();
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ if (input_buffer == nullptr) {
+ if (num_frames_processed_on_empty_input_ < num_frames &&
+ !was_stereo_speaker_mode_enabled) {
+ // If we have no input, generate a silent input buffer until the node
+ // states are cleared.
+ num_frames_processed_on_empty_input_ += num_frames;
+ ambisonic_binaural_decoder_->Process(silence_input_buffer_,
+ &stereo_output_buffer_);
+ return &stereo_output_buffer_;
+ } else {
+ // Skip processing entirely when the states are fully cleared.
+ return nullptr;
+ }
+ }
+
+ num_frames_processed_on_empty_input_ = 0;
+
+ DCHECK_EQ(input_buffer->num_channels(), num_ambisonic_channels_);
+ DCHECK_EQ(input_buffer->num_frames(), num_frames);
+
+ // If stereo speaker mode is enabled, perform M-S stereo decode. Otherwise,
+ // perform binaural decode.
+ if (is_stereo_speaker_mode_) {
+ StereoFromSoundfield(*input_buffer, &stereo_output_buffer_);
+ } else {
+ ambisonic_binaural_decoder_->Process(*input_buffer, &stereo_output_buffer_);
+ }
+
+ if (is_stereo_speaker_mode_ != was_stereo_speaker_mode_enabled) {
+ // Apply linear crossfade between binaural decode and stereo decode outputs.
+ if (was_stereo_speaker_mode_enabled) {
+ StereoFromSoundfield(*input_buffer, &temp_crossfade_buffer_);
+ } else {
+ ambisonic_binaural_decoder_->Process(*input_buffer,
+ &temp_crossfade_buffer_);
+ }
+ crossfader_.ApplyLinearCrossfade(stereo_output_buffer_,
+ temp_crossfade_buffer_,
+ &crossfaded_output_buffer_);
+ return &crossfaded_output_buffer_;
+ }
+
+ // Return the rendered output directly.
+ return &stereo_output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.h
new file mode 100644
index 000000000..548d44e01
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_binaural_decoder_node.h
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_AMBISONIC_BINAURAL_DECODER_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_AMBISONIC_BINAURAL_DECODER_NODE_H_
+
+#include <memory>
+#include <string>
+
+#include "ambisonics/ambisonic_binaural_decoder.h"
+#include "base/audio_buffer.h"
+#include "dsp/fft_manager.h"
+#include "dsp/resampler.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+#include "utils/buffer_crossfader.h"
+
+namespace vraudio {
+
+// Node that takes an ambisonic soundfield as input and renders a binaural
+// stereo buffer as output.
+class AmbisonicBinauralDecoderNode : public ProcessingNode {
+ public:
+ // Initializes AmbisonicBinauralDecoderNode class.
+ //
+ // @param system_settings Global system configuration.
+ // @param ambisonic_order Ambisonic order.
+ // @param sh_hrir_filename Filename to load the HRIR data from.
+ // @param fft_manager Pointer to a manager to perform FFT transformations.
+ // @resampler Pointer to a resampler used to convert HRIRs to the system rate.
+ AmbisonicBinauralDecoderNode(const SystemSettings& system_settings,
+ int ambisonic_order,
+ const std::string& sh_hrir_filename,
+ FftManager* fft_manager, Resampler* resampler);
+
+ ~AmbisonicBinauralDecoderNode() override;
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const SystemSettings& system_settings_;
+
+ // Number of Ambisonic channels.
+ const size_t num_ambisonic_channels_;
+
+ // Denotes if the stereo speaker mode is enabled.
+ bool is_stereo_speaker_mode_;
+
+ // Ambisonic decoder used to render binaural output.
+ std::unique_ptr<AmbisonicBinauralDecoder> ambisonic_binaural_decoder_;
+
+ size_t num_frames_processed_on_empty_input_;
+
+ // Stereo output buffer.
+ AudioBuffer stereo_output_buffer_;
+
+ // Silence mono buffer to render reverb tails.
+ AudioBuffer silence_input_buffer_;
+
+ // Buffer crossfader to apply linear crossfade when the stereo speaker mode is
+ // changed.
+ BufferCrossfader crossfader_;
+
+ // Stereo output buffer to store the crossfaded decode output when necessary.
+ AudioBuffer crossfaded_output_buffer_;
+
+ // Temporary crossfade buffer to store the intermediate stereo output.
+ AudioBuffer temp_crossfade_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_AMBISONIC_BINAURAL_DECODER_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.cc
new file mode 100644
index 000000000..88405464c
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.cc
@@ -0,0 +1,69 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/ambisonic_mixing_encoder_node.h"
+
+#include "ambisonics/utils.h"
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+
+
+namespace vraudio {
+
+AmbisonicMixingEncoderNode::AmbisonicMixingEncoderNode(
+ const SystemSettings& system_settings,
+ const AmbisonicLookupTable& lookup_table, int ambisonic_order)
+ : system_settings_(system_settings),
+ lookup_table_(lookup_table),
+ ambisonic_order_(ambisonic_order),
+ gain_mixer_(GetNumPeriphonicComponents(ambisonic_order_),
+ system_settings_.GetFramesPerBuffer()),
+ coefficients_(GetNumPeriphonicComponents(ambisonic_order_)) {}
+
+const AudioBuffer* AmbisonicMixingEncoderNode::AudioProcess(
+ const NodeInput& input) {
+
+
+ const WorldPosition& listener_position = system_settings_.GetHeadPosition();
+ const WorldRotation& listener_rotation = system_settings_.GetHeadRotation();
+
+ gain_mixer_.Reset();
+ for (auto& input_buffer : input.GetInputBuffers()) {
+ const int source_id = input_buffer->source_id();
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(source_id);
+ DCHECK_NE(source_id, kInvalidSourceId);
+ DCHECK_EQ(input_buffer->num_channels(), 1U);
+
+ // Compute the relative source direction in spherical angles.
+ const ObjectTransform& source_transform =
+ source_parameters->object_transform;
+ WorldPosition relative_direction;
+ GetRelativeDirection(listener_position, listener_rotation,
+ source_transform.position, &relative_direction);
+ const SphericalAngle source_direction =
+ SphericalAngle::FromWorldPosition(relative_direction);
+
+ lookup_table_.GetEncodingCoeffs(ambisonic_order_, source_direction,
+ source_parameters->spread_deg,
+ &coefficients_);
+
+ gain_mixer_.AddInputChannel((*input_buffer)[0], source_id, coefficients_);
+ }
+ return gain_mixer_.GetOutput();
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.h
new file mode 100644
index 000000000..865677571
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node.h
@@ -0,0 +1,71 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_AMBISONIC_MIXING_ENCODER_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_AMBISONIC_MIXING_ENCODER_NODE_H_
+
+#include <vector>
+
+#include "ambisonics/ambisonic_lookup_table.h"
+#include "base/audio_buffer.h"
+#include "base/spherical_angle.h"
+#include "dsp/gain_mixer.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts single mono sound object buffer as input and encodes it
+// into an Ambisonic sound field.
+class AmbisonicMixingEncoderNode : public ProcessingNode {
+ public:
+ // Initializes AmbisonicMixingEncoderNode class.
+ //
+ // @param system_settings Global system configuration.
+ // @param lookup_table Ambisonic encoding lookup table.
+ // @param ambisonic_order Order of Ambisonic sources.
+ AmbisonicMixingEncoderNode(const SystemSettings& system_settings,
+ const AmbisonicLookupTable& lookup_table,
+ int ambisonic_order);
+
+ // Node implementation.
+ bool CleanUp() final {
+ CallCleanUpOnInputNodes();
+ // Prevent node from being disconnected when all sources are removed.
+ return false;
+ }
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const SystemSettings& system_settings_;
+ const AmbisonicLookupTable& lookup_table_;
+
+ // Ambisonic order of encoded sources.
+ const int ambisonic_order_;
+
+ // |GainMixer| instance.
+ GainMixer gain_mixer_;
+
+ // Encoding coefficient values to be applied to encode the input.
+ std::vector<float> coefficients_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_AMBISONIC_MIXING_ENCODER_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node_test.cc
new file mode 100644
index 000000000..1977ab646
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/ambisonic_mixing_encoder_node_test.cc
@@ -0,0 +1,150 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/ambisonic_mixing_encoder_node.h"
+
+#include <vector>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "ambisonics/ambisonic_lookup_table.h"
+#include "ambisonics/utils.h"
+#include "api/resonance_audio_api.h"
+#include "base/constants_and_types.h"
+#include "base/object_transform.h"
+#include "config/source_config.h"
+#include "graph/buffered_source_node.h"
+#include "node/sink_node.h"
+#include "node/source_node.h"
+#include "utils/test_util.h"
+
+namespace vraudio {
+
+namespace {
+
+// Number of frames per input buffer.
+const size_t kFramesPerBuffer = 16;
+
+// Simulated system sample rate.
+const int kSampleRate = 48000;
+
+} // namespace
+
+// Provides unit tests for |AmbisonicMixingEncoderNode|.
+class AmbisonicMixingEncoderNodeTest
+ : public ::testing::TestWithParam<SourceGraphConfig> {
+ protected:
+ AmbisonicMixingEncoderNodeTest()
+ : system_settings_(kNumStereoChannels, kFramesPerBuffer, kSampleRate),
+ lookup_table_(kMaxSupportedAmbisonicOrder) {}
+
+ void SetUp() override {
+ const auto source_config = GetParam();
+ ambisonic_order_ = source_config.ambisonic_order;
+ ambisonic_mixing_encoder_node_ =
+ std::make_shared<AmbisonicMixingEncoderNode>(
+ system_settings_, lookup_table_, ambisonic_order_);
+ }
+
+ const AudioBuffer* ProcessMultipleInputs(size_t num_sources,
+ const WorldPosition& position,
+ float spread_deg) {
+ // Create the node graph, adding input nodes to the Ambisonic Mixing Encoder
+ // Node.
+ buffered_source_nodes_.clear();
+ auto parameters_manager = system_settings_.GetSourceParametersManager();
+ for (size_t i = 0; i < num_sources; ++i) {
+ buffered_source_nodes_.emplace_back(std::make_shared<BufferedSourceNode>(
+ static_cast<SourceId>(i) /*source id*/, kNumMonoChannels,
+ kFramesPerBuffer));
+ parameters_manager->Register(static_cast<SourceId>(i));
+ }
+ const AudioBuffer* output_buffer = nullptr;
+
+ for (auto& input_node : buffered_source_nodes_) {
+ ambisonic_mixing_encoder_node_->Connect(input_node);
+ }
+ auto output_node = std::make_shared<SinkNode>();
+ output_node->Connect(ambisonic_mixing_encoder_node_);
+
+ // Input data containing unit pulses.
+ const std::vector<float> kInputData(kFramesPerBuffer, 1.0f);
+
+ for (size_t index = 0; index < buffered_source_nodes_.size(); ++index) {
+ AudioBuffer* input_buffer =
+ buffered_source_nodes_[index]
+ ->GetMutableAudioBufferAndSetNewBufferFlag();
+ (*input_buffer)[0] = kInputData;
+ auto source_parameters = parameters_manager->GetMutableParameters(
+ static_cast<SourceId>(index));
+ source_parameters->object_transform.position = position;
+ source_parameters->spread_deg = spread_deg;
+ }
+
+ const std::vector<const AudioBuffer*>& buffer_vector =
+ output_node->ReadInputs();
+ if (!buffer_vector.empty()) {
+ DCHECK_EQ(buffer_vector.size(), 1U);
+ output_buffer = buffer_vector.front();
+ }
+
+ return output_buffer;
+ }
+
+ SystemSettings system_settings_;
+ int ambisonic_order_;
+ AmbisonicLookupTable lookup_table_;
+ std::shared_ptr<AmbisonicMixingEncoderNode> ambisonic_mixing_encoder_node_;
+ std::vector<std::shared_ptr<BufferedSourceNode>> buffered_source_nodes_;
+};
+
+// Tests that a number of sound objects encoded in the same direction are
+// correctly combined into an output buffer.
+TEST_P(AmbisonicMixingEncoderNodeTest, TestEncodeAndMix) {
+ // Number of sources to encode and mix.
+ const size_t kNumSources = 4;
+ // Minimum angular source spread of 0 ensures that no gain correction
+ // coefficients are to be applied to the Ambisonic encoding coefficients.
+ const float kSpreadDeg = 0.0f;
+ // Arbitrary world position of sound sources corresponding to the 36 degrees
+ // azimuth and 18 degrees elevation.
+ const WorldPosition kPosition =
+ WorldPosition(-0.55901699f, 0.30901699f, -0.76942088f);
+ // Expected Ambisonic output for a single source at the above position (as
+ // generated using /matlab/ambisonics/ambix/ambencode.m Matlab function):
+ const std::vector<float> kExpectedSingleSourceOutput = {
+ 1.0f, 0.55901699f, 0.30901699f, 0.76942088f,
+ 0.74498856f, 0.29920441f, -0.35676274f, 0.41181955f,
+ 0.24206145f, 0.64679299f, 0.51477443f, -0.17888019f,
+ -0.38975424f, -0.24620746f, 0.16726035f, -0.21015578f};
+
+ const AudioBuffer* output_buffer =
+ ProcessMultipleInputs(kNumSources, kPosition, kSpreadDeg);
+
+ const size_t num_channels = GetNumPeriphonicComponents(ambisonic_order_);
+
+ for (size_t i = 0; i < num_channels; ++i) {
+ EXPECT_NEAR(
+ kExpectedSingleSourceOutput[i] * static_cast<float>(kNumSources),
+ (*output_buffer)[i][kFramesPerBuffer - 1], kEpsilonFloat);
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(TestParameters, AmbisonicMixingEncoderNodeTest,
+ testing::Values(BinauralLowQualityConfig(),
+ BinauralMediumQualityConfig(),
+ BinauralHighQualityConfig()));
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.cc
new file mode 100644
index 000000000..79889fabd
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.cc
@@ -0,0 +1,495 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/binaural_surround_renderer_impl.h"
+
+#include <algorithm>
+#include <functional>
+
+#include "base/misc_math.h"
+#include "base/simd_utils.h"
+#include "base/spherical_angle.h"
+#include "graph/resonance_audio_api_impl.h"
+#include "platforms/common/room_effects_utils.h"
+#include "platforms/common/room_properties.h"
+#include "utils/planar_interleaved_conversion.h"
+
+namespace vraudio {
+
+namespace {
+
+// Maximum number of audio buffers in buffer queue.
+const size_t kNumMaxBuffers = 64;
+
+// Output gain, to avoid clipping of individual virtual speaker channels.
+const float kGain = 0.5f;
+
+} // namespace
+
+BinauralSurroundRendererImpl::BinauralSurroundRendererImpl(
+ size_t frames_per_buffer, int sample_rate_hz)
+ :
+ resonance_audio_api_(nullptr),
+ frames_per_buffer_(frames_per_buffer),
+ sample_rate_hz_(sample_rate_hz),
+ surround_format_(kInvalid),
+ num_input_channels_(0),
+ output_buffer_(kNumStereoChannels, frames_per_buffer),
+ total_frames_buffered_(0),
+ num_zero_padded_frames_(0),
+ output_gain_(1.0f) {
+}
+
+bool BinauralSurroundRendererImpl::Init(SurroundFormat surround_format) {
+ surround_format_ = surround_format;
+ num_input_channels_ =
+ GetExpectedNumChannelsFromSurroundFormat(surround_format);
+
+ temp_planar_buffer_ptrs_.resize(num_input_channels_);
+
+ input_audio_buffer_queue_.reset(new ThreadsafeFifo<AudioBuffer>(
+ kNumMaxBuffers, AudioBuffer(num_input_channels_, frames_per_buffer_)));
+
+ buffer_partitioner_.reset(new BufferPartitioner(
+ num_input_channels_, frames_per_buffer_,
+ std::bind(&BinauralSurroundRendererImpl::BufferPartitionerCallback, this,
+ std::placeholders::_1)));
+
+ buffer_unpartitioner_.reset(new BufferUnpartitioner(
+ kNumStereoChannels, frames_per_buffer_,
+ std::bind(&BinauralSurroundRendererImpl::ProcessBuffer, this)));
+
+ resonance_audio_api_.reset(CreateResonanceAudioApi(
+ kNumStereoChannels, frames_per_buffer_, sample_rate_hz_));
+
+ if (surround_format == kSurroundMono || surround_format == kSurroundStereo ||
+ surround_format == kSurroundFiveDotOne ||
+ surround_format == kSurroundSevenDotOne) {
+ InitializeRoomReverb();
+ }
+ // Initialize rendering mode.
+ switch (surround_format) {
+ case kSurroundMono:
+ InitializeBinauralMono();
+ break;
+ case kSurroundStereo:
+ InitializeBinauralStereo();
+ break;
+ case kSurroundFiveDotOne:
+ InitializeBinauralSurround5dot1();
+ break;
+ case kSurroundSevenDotOne:
+ InitializeBinauralSurround7dot1();
+ break;
+ case kFirstOrderAmbisonics:
+ case kSecondOrderAmbisonics:
+ case kThirdOrderAmbisonics:
+ InitializeAmbisonics();
+ break;
+ case kFirstOrderAmbisonicsWithNonDiegeticStereo:
+ case kSecondOrderAmbisonicsWithNonDiegeticStereo:
+ case kThirdOrderAmbisonicsWithNonDiegeticStereo:
+ InitializeAmbisonicsWithNonDiegeticStereo();
+ break;
+ default:
+ LOG(FATAL) << "Undefined rendering mode";
+ return false;
+ break;
+ }
+ return true;
+}
+
+BinauralSurroundRendererImpl::BinauralSurroundRendererImpl()
+ :
+ resonance_audio_api_(nullptr),
+ frames_per_buffer_(0),
+ sample_rate_hz_(0),
+ total_frames_buffered_(0),
+ num_zero_padded_frames_(0) {
+}
+
+AudioBuffer* BinauralSurroundRendererImpl::BufferPartitionerCallback(
+ AudioBuffer* processed_buffer) {
+ if (processed_buffer != nullptr) {
+ input_audio_buffer_queue_->ReleaseInputObject(processed_buffer);
+ }
+ DCHECK(!input_audio_buffer_queue_->Full());
+ return input_audio_buffer_queue_->AcquireInputObject();
+}
+
+void BinauralSurroundRendererImpl::SetStereoSpeakerMode(bool enabled) {
+ resonance_audio_api_->SetStereoSpeakerMode(enabled);
+}
+
+size_t BinauralSurroundRendererImpl::GetExpectedNumChannelsFromSurroundFormat(
+ SurroundFormat surround_format) {
+ switch (surround_format) {
+ case kSurroundMono:
+ return kNumMonoChannels;
+ case kSurroundStereo:
+ return kNumStereoChannels;
+ case kSurroundFiveDotOne:
+ return kNumSurroundFiveDotOneChannels;
+ case kSurroundSevenDotOne:
+ return kNumSurroundSevenDotOneChannels;
+ case kFirstOrderAmbisonics:
+ return kNumFirstOrderAmbisonicChannels;
+ case kSecondOrderAmbisonics:
+ return kNumSecondOrderAmbisonicChannels;
+ case kThirdOrderAmbisonics:
+ return kNumThirdOrderAmbisonicChannels;
+ case kFirstOrderAmbisonicsWithNonDiegeticStereo:
+ return kNumFirstOrderAmbisonicChannels + kNumStereoChannels;
+ case kSecondOrderAmbisonicsWithNonDiegeticStereo:
+ return kNumSecondOrderAmbisonicChannels + kNumStereoChannels;
+ case kThirdOrderAmbisonicsWithNonDiegeticStereo:
+ return kNumThirdOrderAmbisonicChannels + kNumStereoChannels;
+ default:
+ LOG(FATAL) << "Undefined surround format mode";
+ return false;
+ break;
+ }
+ return 0;
+}
+
+void BinauralSurroundRendererImpl::InitializeBinauralMono() {
+ source_ids_.resize(kNumMonoChannels);
+ // Front (0 degrees):
+ source_ids_[0] = CreateSoundObject(0.0f);
+ output_gain_ = kGain;
+}
+
+void BinauralSurroundRendererImpl::InitializeBinauralStereo() {
+
+ source_ids_.resize(kNumStereoChannels);
+ // Front left (30 degrees):
+ source_ids_[0] = CreateSoundObject(30.0f);
+ // Front right (-30 degrees):
+ source_ids_[1] = CreateSoundObject(-30.0f);
+ output_gain_ = kGain;
+}
+
+void BinauralSurroundRendererImpl::InitializeBinauralSurround5dot1() {
+ source_ids_.resize(kNumSurroundFiveDotOneChannels);
+ // Left (30 degrees):
+ source_ids_[0] = CreateSoundObject(30.0f);
+ // Right (-30 degrees):
+ source_ids_[1] = CreateSoundObject(-30.0f);
+ // Center (0 degrees):
+ source_ids_[2] = CreateSoundObject(0.0f);
+ // Low frequency effects at front center:
+ source_ids_[3] = CreateSoundObject(0.0f);
+ // Left surround (110 degrees):
+ source_ids_[4] = CreateSoundObject(110.0f);
+ // Right surround (-110 degrees):
+ source_ids_[5] = CreateSoundObject(-110.0f);
+ output_gain_ = kGain;
+}
+
+void BinauralSurroundRendererImpl::InitializeBinauralSurround7dot1() {
+ source_ids_.resize(kNumSurroundSevenDotOneChannels);
+ // Left (30 degrees):
+ source_ids_[0] = CreateSoundObject(30.0f);
+ // Right (-30 degrees):
+ source_ids_[1] = CreateSoundObject(-30.0f);
+ // Center (0 degrees):
+ source_ids_[2] = CreateSoundObject(0.0f);
+ // Low frequency effects at front center:
+ source_ids_[3] = CreateSoundObject(0.0f);
+ // Left surround 1 (90 degrees):
+ source_ids_[4] = CreateSoundObject(90.0f);
+ // Right surround 1 (-90 degrees):
+ source_ids_[5] = CreateSoundObject(-90.0f);
+ // Left surround 2 (150 degrees):
+ source_ids_[6] = CreateSoundObject(150.0f);
+ // Right surround 2 (-150 degrees):
+ source_ids_[7] = CreateSoundObject(-150.0f);
+ output_gain_ = kGain;
+}
+
+void BinauralSurroundRendererImpl::InitializeAmbisonics() {
+ source_ids_.resize(1);
+ source_ids_[0] =
+ resonance_audio_api_->CreateAmbisonicSource(num_input_channels_);
+}
+
+void BinauralSurroundRendererImpl::InitializeAmbisonicsWithNonDiegeticStereo() {
+ source_ids_.resize(2);
+ CHECK_GT(num_input_channels_, kNumStereoChannels);
+ source_ids_[0] = resonance_audio_api_->CreateAmbisonicSource(
+ num_input_channels_ - kNumStereoChannels);
+ source_ids_[1] = resonance_audio_api_->CreateStereoSource(kNumStereoChannels);
+}
+
+SourceId BinauralSurroundRendererImpl::CreateSoundObject(float azimuth_deg) {
+ static const float kZeroElevation = 0.0f;
+ auto speaker_position =
+ vraudio::SphericalAngle::FromDegrees(azimuth_deg, kZeroElevation)
+ .GetWorldPositionOnUnitSphere();
+ const SourceId source_id = resonance_audio_api_->CreateSoundObjectSource(
+ RenderingMode::kBinauralHighQuality);
+ resonance_audio_api_->SetSourcePosition(
+ source_id, speaker_position[0], speaker_position[1], speaker_position[2]);
+ return source_id;
+}
+
+void BinauralSurroundRendererImpl::InitializeRoomReverb() {
+ // The following settings has been applied based on AESTD1001.1.01-10.
+ RoomProperties room_properties;
+ room_properties.dimensions[0] = 9.54f;
+ room_properties.dimensions[1] = 6.0f;
+ room_properties.dimensions[2] = 15.12f;
+ room_properties.reverb_brightness = 0.0f;
+ room_properties.reflection_scalar = 1.0f;
+ // Reduce reverb gain to compensate for virtual speakers gain.
+ room_properties.reverb_gain = output_gain_;
+ for (size_t i = 0; i < kNumRoomSurfaces; ++i) {
+ room_properties.material_names[i] = MaterialName::kUniform;
+ }
+ resonance_audio_api_->SetReflectionProperties(
+ ComputeReflectionProperties(room_properties));
+ resonance_audio_api_->SetReverbProperties(
+ ComputeReverbProperties(room_properties));
+ resonance_audio_api_->EnableRoomEffects(true);
+}
+
+size_t BinauralSurroundRendererImpl::GetNumAvailableFramesInInputBuffer()
+ const {
+ DCHECK_NE(surround_format_, kInvalid);
+ if (num_zero_padded_frames_ > 0) {
+ // Zero padded output buffers must be consumed prior to
+ // |AddInterleavedBuffer| calls;
+ return 0;
+ }
+ if (input_audio_buffer_queue_->Full()) {
+ return 0;
+ }
+ // Subtract two buffers from the available input slots to ensure the buffer
+ // partitioner can be flushed at any time while keeping an extra buffer
+ // available in the |buffer_partitioner_| callback for the next incoming data.
+ const size_t num_frames_available_in_input_slots =
+ (kNumMaxBuffers - input_audio_buffer_queue_->Size() - 2) *
+ frames_per_buffer_;
+ DCHECK_GT(frames_per_buffer_, buffer_partitioner_->GetNumBufferedFrames());
+ const size_t num_frames_available_in_buffer_partitioner =
+ frames_per_buffer_ - buffer_partitioner_->GetNumBufferedFrames();
+ return num_frames_available_in_input_slots +
+ num_frames_available_in_buffer_partitioner;
+}
+
+size_t BinauralSurroundRendererImpl::AddInterleavedInput(
+ const int16* input_buffer_ptr, size_t num_channels, size_t num_frames) {
+ return AddInputBufferTemplated<const int16*>(input_buffer_ptr, num_channels,
+ num_frames);
+}
+
+size_t BinauralSurroundRendererImpl::AddInterleavedInput(
+ const float* input_buffer_ptr, size_t num_channels, size_t num_frames) {
+ return AddInputBufferTemplated<const float*>(input_buffer_ptr, num_channels,
+ num_frames);
+}
+
+size_t BinauralSurroundRendererImpl::AddPlanarInput(
+ const int16* const* input_buffer_ptrs, size_t num_channels,
+ size_t num_frames) {
+ return AddInputBufferTemplated<const int16* const*>(input_buffer_ptrs,
+ num_channels, num_frames);
+}
+
+size_t BinauralSurroundRendererImpl::AddPlanarInput(
+ const float* const* input_buffer_ptrs, size_t num_channels,
+ size_t num_frames) {
+ return AddInputBufferTemplated<const float* const*>(input_buffer_ptrs,
+ num_channels, num_frames);
+}
+
+template <typename BufferType>
+size_t BinauralSurroundRendererImpl::AddInputBufferTemplated(
+ const BufferType input_buffer_ptr, size_t num_channels, size_t num_frames) {
+ DCHECK_NE(surround_format_, kInvalid);
+ if (num_channels != num_input_channels_) {
+ LOG(WARNING) << "Invalid number of input channels";
+ return 0;
+ }
+
+ if (num_zero_padded_frames_ > 0) {
+ LOG(WARNING) << "Zero padded output buffers must be consumed prior to "
+ "|AddInterleavedBuffer| calls";
+ return 0;
+ }
+ const size_t num_available_input_frames =
+ std::min(num_frames, GetNumAvailableFramesInInputBuffer());
+
+ buffer_partitioner_->AddBuffer(input_buffer_ptr, num_input_channels_,
+ num_available_input_frames);
+ total_frames_buffered_ += num_available_input_frames;
+ return num_available_input_frames;
+}
+
+size_t BinauralSurroundRendererImpl::GetAvailableFramesInStereoOutputBuffer()
+ const {
+ const size_t num_available_samples_in_buffers =
+ (input_audio_buffer_queue_->Size() * frames_per_buffer_) +
+ buffer_unpartitioner_->GetNumBufferedFrames();
+ return std::min(total_frames_buffered_, num_available_samples_in_buffers);
+}
+
+size_t BinauralSurroundRendererImpl::GetInterleavedStereoOutput(
+ int16* output_buffer_ptr, size_t num_frames) {
+ return GetStereoOutputBufferTemplated<int16*>(output_buffer_ptr, num_frames);
+}
+
+size_t BinauralSurroundRendererImpl::GetInterleavedStereoOutput(
+ float* output_buffer_ptr, size_t num_frames) {
+ return GetStereoOutputBufferTemplated<float*>(output_buffer_ptr, num_frames);
+}
+
+size_t BinauralSurroundRendererImpl::GetPlanarStereoOutput(
+ int16** output_buffer_ptrs, size_t num_frames) {
+ return GetStereoOutputBufferTemplated<int16**>(output_buffer_ptrs,
+ num_frames);
+}
+
+size_t BinauralSurroundRendererImpl::GetPlanarStereoOutput(
+ float** output_buffer_ptrs, size_t num_frames) {
+ return GetStereoOutputBufferTemplated<float**>(output_buffer_ptrs,
+ num_frames);
+}
+
+template <typename BufferType>
+size_t BinauralSurroundRendererImpl::GetStereoOutputBufferTemplated(
+ BufferType output_buffer_ptr, size_t num_frames) {
+ DCHECK_NE(surround_format_, kInvalid);
+ const size_t num_frames_available = GetAvailableFramesInStereoOutputBuffer();
+ size_t num_frames_to_be_processed =
+ std::min(num_frames_available, num_frames);
+ if (num_frames_to_be_processed > total_frames_buffered_) {
+ // Avoid outputting zero padded input frames from |TriggerProcessing|
+ // calls.
+ num_frames_to_be_processed = total_frames_buffered_;
+ }
+
+ const size_t num_frames_written = buffer_unpartitioner_->GetBuffer(
+ output_buffer_ptr, kNumStereoChannels, num_frames_to_be_processed);
+
+ DCHECK_GE(total_frames_buffered_, num_frames_written);
+ total_frames_buffered_ -= num_frames_written;
+
+ if (total_frames_buffered_ == 0) {
+ // Clear zero padded frames from |TriggerProcessing| calls.
+ buffer_unpartitioner_->Clear();
+ num_zero_padded_frames_ = 0;
+ }
+
+ return num_frames_written;
+}
+
+void BinauralSurroundRendererImpl::Clear() {
+ input_audio_buffer_queue_->Clear();
+ buffer_partitioner_->Clear();
+ buffer_unpartitioner_->Clear();
+ total_frames_buffered_ = 0;
+ num_zero_padded_frames_ = 0;
+}
+
+bool BinauralSurroundRendererImpl::TriggerProcessing() {
+ if (num_zero_padded_frames_ > 0) {
+ LOG(WARNING) << "Zero padded output buffers must be consumed prior to "
+ "|TriggerProcessing| calls";
+ return false;
+ }
+ num_zero_padded_frames_ = buffer_partitioner_->Flush();
+ return num_zero_padded_frames_ > 0;
+}
+
+void BinauralSurroundRendererImpl::SetHeadRotation(float w, float x, float y,
+ float z) {
+ resonance_audio_api_->SetHeadRotation(x, y, z, w);
+}
+
+AudioBuffer* BinauralSurroundRendererImpl::ProcessBuffer() {
+ if (input_audio_buffer_queue_->Size() == 0) {
+ LOG(WARNING) << "Buffer underflow detected";
+ return nullptr;
+ }
+
+ const AudioBuffer* input = input_audio_buffer_queue_->AcquireOutputObject();
+ DCHECK_EQ(input->num_frames(), frames_per_buffer_);
+ DCHECK_EQ(num_input_channels_, input->num_channels());
+ GetRawChannelDataPointersFromAudioBuffer(*input, &temp_planar_buffer_ptrs_);
+ // Initialize surround rendering.
+ const float* planar_ptr;
+
+ switch (surround_format_) {
+ case kSurroundMono:
+ case kSurroundStereo:
+ case kSurroundFiveDotOne:
+ case kSurroundSevenDotOne:
+ DCHECK_EQ(input->num_channels(), source_ids_.size());
+ for (size_t source_itr = 0; source_itr < source_ids_.size();
+ ++source_itr) {
+ planar_ptr = (*input)[source_itr].begin();
+ resonance_audio_api_->SetPlanarBuffer(source_ids_[source_itr],
+ &planar_ptr, kNumMonoChannels,
+ input->num_frames());
+ }
+ break;
+ case kFirstOrderAmbisonics:
+ case kSecondOrderAmbisonics:
+ case kThirdOrderAmbisonics:
+ DCHECK_EQ(source_ids_.size(), 1U);
+ resonance_audio_api_->SetPlanarBuffer(
+ source_ids_[0], temp_planar_buffer_ptrs_.data(),
+ input->num_channels(), input->num_frames());
+ break;
+ case kFirstOrderAmbisonicsWithNonDiegeticStereo:
+ case kSecondOrderAmbisonicsWithNonDiegeticStereo:
+ case kThirdOrderAmbisonicsWithNonDiegeticStereo:
+ DCHECK_EQ(source_ids_.size(), 2U);
+ DCHECK_GT(input->num_channels(), kNumStereoChannels);
+ static_cast<ResonanceAudioApiImpl*>(resonance_audio_api_.get())
+ ->SetPlanarBuffer(source_ids_[0], temp_planar_buffer_ptrs_.data(),
+ input->num_channels() - kNumStereoChannels,
+ input->num_frames());
+ static_cast<ResonanceAudioApiImpl*>(resonance_audio_api_.get())
+ ->SetPlanarBuffer(source_ids_[1],
+ temp_planar_buffer_ptrs_.data() +
+ (input->num_channels() - kNumStereoChannels),
+ kNumStereoChannels, input->num_frames());
+ break;
+ default:
+ LOG(FATAL) << "Undefined surround format";
+ break;
+ }
+
+ // Create a copy of the processed |AudioBuffer| to pass it to output buffer
+ // queue.
+ auto* const vraudio_api_impl =
+ static_cast<ResonanceAudioApiImpl*>(resonance_audio_api_.get());
+ vraudio_api_impl->ProcessNextBuffer();
+ output_buffer_ = *vraudio_api_impl->GetStereoOutputBuffer();
+
+ if (output_gain_ != 1.0f) {
+ for (AudioBuffer::Channel& channel : output_buffer_) {
+ ScalarMultiply(output_buffer_.num_frames(), output_gain_, channel.begin(),
+ channel.begin());
+ }
+ }
+ input_audio_buffer_queue_->ReleaseOutputObject(input);
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.h b/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.h
new file mode 100644
index 000000000..c4916d664
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl.h
@@ -0,0 +1,190 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_BINAURAL_SURROUND_RENDERER_IMPL_H_
+#define RESONANCE_AUDIO_GRAPH_BINAURAL_SURROUND_RENDERER_IMPL_H_
+
+#include <memory>
+#include <string>
+
+#include "api/binaural_surround_renderer.h"
+#include "api/resonance_audio_api.h"
+#include "base/constants_and_types.h"
+#include "utils/buffer_partitioner.h"
+#include "utils/buffer_unpartitioner.h"
+#include "utils/threadsafe_fifo.h"
+
+namespace vraudio {
+
+// Renders virtual surround sound as well as ambisonic soundfields to binaural
+// stereo.
+class BinauralSurroundRendererImpl : public BinauralSurroundRenderer {
+ public:
+ // Constructor.
+ //
+ // @param frames_per_buffer Number of frames in output buffer.
+ // @param sample_rate_hz Sample rate of audio buffers.
+ BinauralSurroundRendererImpl(size_t frames_per_buffer, int sample_rate_hz);
+
+ ~BinauralSurroundRendererImpl() override{};
+
+ // Initializes surround sound decoding.
+ //
+ // @param surround_format Surround sound input format.
+ // @return True on success.
+ bool Init(SurroundFormat surround_format);
+
+ // Implements |AudioRenderer| interface.
+ void SetStereoSpeakerMode(bool enabled) override;
+ size_t GetNumAvailableFramesInInputBuffer() const override;
+ size_t AddInterleavedInput(const int16* input_buffer_ptr, size_t num_channels,
+ size_t num_frames) override;
+ size_t AddInterleavedInput(const float* input_buffer_ptr, size_t num_channels,
+ size_t num_frames) override;
+ size_t AddPlanarInput(const int16* const* input_buffer_ptrs,
+ size_t num_channels, size_t num_frames) override;
+ size_t AddPlanarInput(const float* const* input_buffer_ptrs,
+ size_t num_channels, size_t num_frames) override;
+ size_t GetAvailableFramesInStereoOutputBuffer() const override;
+ size_t GetInterleavedStereoOutput(int16* output_buffer_ptr,
+ size_t num_frames) override;
+ size_t GetInterleavedStereoOutput(float* output_buffer_ptr,
+ size_t num_frames) override;
+ size_t GetPlanarStereoOutput(int16** output_buffer_ptrs,
+ size_t num_frames) override;
+ size_t GetPlanarStereoOutput(float** output_buffer_ptrs,
+ size_t num_frames) override;
+ bool TriggerProcessing() override;
+ void Clear() override;
+ void SetHeadRotation(float w, float x, float y, float z) override;
+
+ protected:
+ // Protected default constructor for mock tests.
+ BinauralSurroundRendererImpl();
+
+ private:
+ // Callback triggered by |buffer_partitioner_| whenever a new |AudioBuffer|
+ // has been generated.
+ //
+ // @param processed_buffer Pointer to processed buffer.
+ // @return Pointer to next |AudioBuffer| to be filled up.
+ AudioBuffer* BufferPartitionerCallback(AudioBuffer* processed_buffer);
+
+ // Helper method to implement |AddInterleavedInput| independently from the
+ // sample type.
+ //
+ // @tparam BufferType Input buffer type.
+ // @param input_buffer_ptr Pointer to interleaved input data.
+ // @param num_channels Number of channels in input buffer.
+ // @param num_frames Number of frames in input buffer.
+ // @return The number of consumed samples.
+ template <typename BufferType>
+ size_t AddInputBufferTemplated(const BufferType input_buffer_ptr,
+ size_t num_channels, size_t num_frames);
+
+ // Helper method to implement |GetInterleavedOutput| independently from the
+ // sample type.
+ //
+ // @tparam BufferType Output buffer type.
+ // @param output_buffer_ptr Pointer to allocated interleaved output buffer.
+ // @param num_frames Size of output buffer in frames.
+ // @return The number of consumed frames.
+ template <typename BufferType>
+ size_t GetStereoOutputBufferTemplated(BufferType output_buffer_ptr,
+ size_t num_frames);
+
+ // Helper method to obtain the expected number of audio channels for a given
+ // surround format.
+ //
+ // @param surround_format Surround format query.
+ // @return Number of audio channels.
+ static size_t GetExpectedNumChannelsFromSurroundFormat(
+ SurroundFormat surround_format);
+
+ // Process method executed by |buffer_unpartitioner_|.
+ AudioBuffer* ProcessBuffer();
+
+ // Initializes binaural mono rendering.
+ void InitializeBinauralMono();
+
+ // Initializes binaural stereo rendering.
+ void InitializeBinauralStereo();
+
+ // Initializes binaural 5.1 rendering.
+ void InitializeBinauralSurround5dot1();
+
+ // Initializes binaural 7.1 rendering.
+ void InitializeBinauralSurround7dot1();
+
+ // Initializes binaural ambisonic rendering.
+ void InitializeAmbisonics();
+
+ // Initializes binaural ambisonic rendering with non-diegetic stereo.
+ void InitializeAmbisonicsWithNonDiegeticStereo();
+
+ // Creates a sound object at given angle within the horizontal listener plane.
+ SourceId CreateSoundObject(float azimuth_deg);
+
+ // Initializes room reverb for virtual surround sound rendering.
+ void InitializeRoomReverb();
+
+ // ResonanceAudioApi instance.
+ std::unique_ptr<ResonanceAudioApi> resonance_audio_api_;
+
+ // Frames per buffer.
+ const size_t frames_per_buffer_;
+
+ // System sample rate.
+ const int sample_rate_hz_;
+
+ // Selected surround sound format.
+ SurroundFormat surround_format_;
+
+ // Number of input channels.
+ size_t num_input_channels_;
+
+ // Partitions input buffers into |AudioBuffer|s.
+ std::unique_ptr<BufferPartitioner> buffer_partitioner_;
+
+ // Buffer queue containing partitioned input |AudioBuffer|s.
+ std::unique_ptr<ThreadsafeFifo<AudioBuffer>> input_audio_buffer_queue_;
+
+ // Binaural stereo output buffer.
+ AudioBuffer output_buffer_;
+
+ // Unpartitions processed |AudioBuffer|s into interleaved output buffers.
+ std::unique_ptr<BufferUnpartitioner> buffer_unpartitioner_;
+
+ // Vector containing the source ids of all rendered sound sources.
+ std::vector<SourceId> source_ids_;
+
+ // Total number of frames currently buffered.
+ size_t total_frames_buffered_;
+
+ // Total number of zero padded frames from |TriggerProcessing| calls.
+ size_t num_zero_padded_frames_;
+
+ // Temporary buffer to store pointers to planar ambisonic and stereo channels.
+ std::vector<const float*> temp_planar_buffer_ptrs_;
+
+ // Global output gain adjustment, to avoid clipping of individual channels
+ // in virtual speaker modes.
+ float output_gain_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_BINAURAL_SURROUND_RENDERER_IMPL_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl_test.cc
new file mode 100644
index 000000000..98652ca3e
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/binaural_surround_renderer_impl_test.cc
@@ -0,0 +1,202 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/binaural_surround_renderer_impl.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "base/constants_and_types.h"
+
+namespace vraudio {
+
+namespace {
+
+class BinauralSurroundRendererTest
+ : public ::testing::TestWithParam<
+ BinauralSurroundRenderer::SurroundFormat> {
+ protected:
+ BinauralSurroundRendererTest() {}
+
+ // Virtual methods from ::testing::Test
+ ~BinauralSurroundRendererTest() override {}
+ void SetUp() override {}
+ void TearDown() override {}
+
+ void InitBinauralSurroundRenderer(size_t frames_per_buffer,
+ int sample_rate_hz) {
+ binaural_surround_renderer_api_.reset(
+ new BinauralSurroundRendererImpl(frames_per_buffer, sample_rate_hz));
+ }
+
+ // Processes an interleaved input vector and returns interleaved binaural
+ // stereo output.
+ std::vector<float> ProcessInterleaved(
+ const std::vector<float>& interleaved_input, size_t num_channels,
+ size_t frames_per_buffer) {
+ EXPECT_EQ(interleaved_input.size() % (num_channels * frames_per_buffer),
+ 0U);
+ std::vector<float> interleaved_output;
+ const size_t num_buffers =
+ interleaved_input.size() / (num_channels * frames_per_buffer);
+ for (size_t b = 0; b < num_buffers; ++b) {
+ const float* interleaved_input_ptr =
+ interleaved_input.data() + b * num_channels * frames_per_buffer;
+ binaural_surround_renderer_api_->AddInterleavedInput(
+ interleaved_input_ptr, num_channels, frames_per_buffer);
+
+ interleaved_output.resize((b + 1) * kNumStereoChannels *
+ frames_per_buffer);
+ float* interleaved_output_ptr =
+ interleaved_output.data() +
+ b * kNumStereoChannels * frames_per_buffer;
+
+ EXPECT_EQ(binaural_surround_renderer_api_->GetInterleavedStereoOutput(
+ interleaved_output_ptr, frames_per_buffer),
+ frames_per_buffer);
+ }
+ return interleaved_output;
+ }
+
+ // Calculates the maximum absolute difference between adjacent samples in an
+ // interleaved audio buffer.
+ float GetMaximumSampleDiff(const std::vector<float>& interleaved_input,
+ size_t num_channels) {
+ if (interleaved_input.size() <= num_channels) {
+ return 0.0f;
+ }
+
+ float max_sample_diff = 0.0f;
+ std::vector<float> prev_samples(num_channels);
+ for (size_t i = 0; i < num_channels; ++i) {
+ prev_samples[i] = interleaved_input[i];
+ }
+ for (size_t i = num_channels; i < interleaved_input.size(); ++i) {
+ const size_t channel = i % num_channels;
+ max_sample_diff =
+ std::max(max_sample_diff,
+ std::abs(interleaved_input[i] - prev_samples[channel]));
+ prev_samples[channel] = interleaved_input[i];
+ }
+ return max_sample_diff;
+ }
+
+ // Helper to return the number of input channels for a given surround format.
+ size_t GetNumInputChannelsForSurroundFormat(
+ BinauralSurroundRenderer::SurroundFormat format) {
+ switch (format) {
+ case BinauralSurroundRenderer::SurroundFormat::kSurroundMono:
+ return kNumMonoChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::kSurroundStereo:
+ return kNumStereoChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::kSurroundFiveDotOne:
+ return kNumSurroundFiveDotOneChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::kSurroundSevenDotOne:
+ return kNumSurroundSevenDotOneChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::kFirstOrderAmbisonics:
+ return kNumFirstOrderAmbisonicChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::
+ kFirstOrderAmbisonicsWithNonDiegeticStereo:
+ return kNumFirstOrderAmbisonicWithNonDiegeticStereoChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::kSecondOrderAmbisonics:
+ return kNumSecondOrderAmbisonicChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::
+ kSecondOrderAmbisonicsWithNonDiegeticStereo:
+ return kNumSecondOrderAmbisonicWithNonDiegeticStereoChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::kThirdOrderAmbisonics:
+ return kNumThirdOrderAmbisonicChannels;
+ break;
+ case BinauralSurroundRenderer::SurroundFormat::
+ kThirdOrderAmbisonicsWithNonDiegeticStereo:
+ return kNumThirdOrderAmbisonicWithNonDiegeticStereoChannels;
+ break;
+ default:
+ break;
+ }
+ LOG(FATAL) << "Unexpected format";
+ return 0;
+ }
+
+ // VR Audio API instance to test.
+ std::unique_ptr<BinauralSurroundRendererImpl> binaural_surround_renderer_api_;
+};
+
+// Processes an input signal with constant DC offset and scans the output for
+// drop outs and noise.
+TEST_P(BinauralSurroundRendererTest, DropOutGlitchTesting) {
+
+ const std::vector<int> kTestSampleRates = {44100, 48000};
+ const std::vector<int> kTestBufferSizes = {256, 413, 512};
+ const size_t kNumTestBuffers = 16;
+ const size_t kNumNumChannels =
+ GetNumInputChannelsForSurroundFormat(GetParam());
+
+ for (int sample_rate : kTestSampleRates) {
+ for (int buffer_size : kTestBufferSizes) {
+ InitBinauralSurroundRenderer(buffer_size, sample_rate);
+ binaural_surround_renderer_api_->Init(GetParam());
+
+ // Create DC input signal with magnitude 0.5f.
+ const std::vector<float> interleaved_dc_signal(
+ buffer_size * kNumNumChannels * kNumTestBuffers, 0.5f);
+
+ std::vector<float> interleaved_output = ProcessInterleaved(
+ interleaved_dc_signal, kNumNumChannels, buffer_size);
+
+ // Remove first half of samples from output vector to remove initial
+ // filter ringing effects and initial gain ramps.
+ interleaved_output.erase(
+ interleaved_output.begin(),
+ interleaved_output.begin() + interleaved_output.size() / 2);
+
+ const float kMaxExpectedMagnitudeDiff = 0.07f;
+ const float maximum_adjacent_frames_magnitude_diff =
+ GetMaximumSampleDiff(interleaved_output, kNumStereoChannels);
+ EXPECT_LT(maximum_adjacent_frames_magnitude_diff,
+ kMaxExpectedMagnitudeDiff);
+ }
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(
+ SurroundFormatInstances, BinauralSurroundRendererTest,
+ ::testing::Values(
+ BinauralSurroundRenderer::SurroundFormat::kSurroundMono,
+ BinauralSurroundRenderer::SurroundFormat::kSurroundStereo,
+ BinauralSurroundRenderer::SurroundFormat::kSurroundFiveDotOne,
+ BinauralSurroundRenderer::SurroundFormat::kSurroundSevenDotOne,
+ BinauralSurroundRenderer::SurroundFormat::kFirstOrderAmbisonics,
+ BinauralSurroundRenderer::SurroundFormat::
+ kFirstOrderAmbisonicsWithNonDiegeticStereo,
+ BinauralSurroundRenderer::SurroundFormat::kSecondOrderAmbisonics,
+ BinauralSurroundRenderer::SurroundFormat::
+ kSecondOrderAmbisonicsWithNonDiegeticStereo,
+ BinauralSurroundRenderer::SurroundFormat::kThirdOrderAmbisonics,
+ BinauralSurroundRenderer::SurroundFormat::
+ kThirdOrderAmbisonicsWithNonDiegeticStereo));
+
+} // namespace
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.cc
new file mode 100644
index 000000000..ab3e49d73
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.cc
@@ -0,0 +1,46 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/buffered_source_node.h"
+
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+
+namespace vraudio {
+
+BufferedSourceNode::BufferedSourceNode(SourceId source_id, size_t num_channels,
+ size_t frames_per_buffer)
+ : source_id_(source_id),
+ input_audio_buffer_(num_channels, frames_per_buffer),
+ new_buffer_flag_(false) {
+ input_audio_buffer_.Clear();
+}
+
+AudioBuffer* BufferedSourceNode::GetMutableAudioBufferAndSetNewBufferFlag() {
+ new_buffer_flag_ = true;
+ return &input_audio_buffer_;
+}
+
+const AudioBuffer* BufferedSourceNode::AudioProcess() {
+ if (!new_buffer_flag_) {
+ return nullptr;
+ }
+ new_buffer_flag_ = false;
+ input_audio_buffer_.set_source_id(source_id_);
+ return &input_audio_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.h
new file mode 100644
index 000000000..3d5ce0b4a
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/buffered_source_node.h
@@ -0,0 +1,62 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_BUFFERED_SOURCE_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_BUFFERED_SOURCE_NODE_H_
+
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "node/source_node.h"
+
+namespace vraudio {
+
+// Node that sets the |AudioBuffer| of a source. This class is *not*
+// thread-safe and calls to this class must be synchronized with the graph
+// processing.
+class BufferedSourceNode : public SourceNode {
+ public:
+ // Constructor.
+ //
+ // @param source_id Source id.
+ // @param num_channel Number of channels in output buffers.
+ BufferedSourceNode(SourceId source_id, size_t num_channels,
+ size_t frames_per_buffer);
+
+ // Returns a mutable pointer to the internal |AudioBuffer| and sets a flag to
+ // process the buffer in the next graph processing iteration. Calls to this
+ // method must be synchronized with the audio graph processing.
+ //
+ // @return Mutable audio buffer pointer.
+ AudioBuffer* GetMutableAudioBufferAndSetNewBufferFlag();
+
+ protected:
+ // Implements SourceNode.
+ const AudioBuffer* AudioProcess() override;
+
+ // Source id.
+ const SourceId source_id_;
+
+ // Input audio buffer.
+ AudioBuffer input_audio_buffer_;
+
+ // Flag indicating if an new audio buffer has been set via
+ // |GetMutableAudioBufferAndSetNewBufferFlag|.
+ bool new_buffer_flag_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_BUFFERED_SOURCE_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.cc
new file mode 100644
index 000000000..71c81d017
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.cc
@@ -0,0 +1,67 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/foa_rotator_node.h"
+
+#include "base/logging.h"
+
+
+namespace vraudio {
+
+FoaRotatorNode::FoaRotatorNode(SourceId source_id,
+ const SystemSettings& system_settings)
+ : system_settings_(system_settings),
+ output_buffer_(kNumFirstOrderAmbisonicChannels,
+ system_settings.GetFramesPerBuffer()) {
+ output_buffer_.Clear();
+ output_buffer_.set_source_id(source_id);
+}
+
+const AudioBuffer* FoaRotatorNode::AudioProcess(const NodeInput& input) {
+
+
+ // Get the soundfield input buffer.
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ DCHECK(input_buffer);
+ DCHECK_GT(input_buffer->num_frames(), 0U);
+ DCHECK_EQ(input_buffer->num_channels(), 4U);
+ DCHECK_EQ(input_buffer->source_id(), output_buffer_.source_id());
+
+ // Rotate soundfield buffer by the inverse head orientation.
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(input_buffer->source_id());
+ if (source_parameters == nullptr) {
+ LOG(WARNING) << "Could not find source parameters";
+ return nullptr;
+ }
+
+ const WorldRotation& source_rotation =
+ source_parameters->object_transform.rotation;
+ const WorldRotation inverse_head_rotation =
+ system_settings_.GetHeadRotation().conjugate();
+ const WorldRotation rotation = inverse_head_rotation * source_rotation;
+ const bool rotation_applied =
+ foa_rotator_.Process(rotation, *input_buffer, &output_buffer_);
+
+ if (!rotation_applied) {
+ return input_buffer;
+ }
+
+ // Copy buffer parameters.
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.h
new file mode 100644
index 000000000..802478a0a
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/foa_rotator_node.h
@@ -0,0 +1,54 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_FOA_ROTATOR_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_FOA_ROTATOR_NODE_H_
+
+#include <memory>
+
+#include "ambisonics/foa_rotator.h"
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts a single first order PeriphonicSoundfieldBuffer as input
+// and outputs a rotated PeriphonicSoundfieldBuffer of the corresponding
+// soundfield input using head rotation information from the system settings.
+class FoaRotatorNode : public ProcessingNode {
+ public:
+ FoaRotatorNode(SourceId source_id, const SystemSettings& system_settings);
+
+ protected:
+ // Implements ProcessingNode. Returns a null pointer if we are in stereo
+ // loudspeaker or stereo pan mode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const SystemSettings& system_settings_;
+
+ // Soundfield rotator used to rotate first order soundfields.
+ FoaRotator foa_rotator_;
+
+ // Output buffer.
+ AudioBuffer output_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_FOA_ROTATOR_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.cc
new file mode 100644
index 000000000..26ab2982f
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.cc
@@ -0,0 +1,66 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/gain_mixer_node.h"
+
+#include <vector>
+
+#include "base/constants_and_types.h"
+
+
+namespace vraudio {
+
+GainMixerNode::GainMixerNode(const AttenuationType& attenuation_type,
+ const SystemSettings& system_settings,
+ size_t num_channels)
+ : mute_enabled_(false),
+ attenuation_type_(attenuation_type),
+ gain_mixer_(num_channels, system_settings.GetFramesPerBuffer()),
+ system_settings_(system_settings) {}
+
+void GainMixerNode::SetMute(bool mute_enabled) { mute_enabled_ = mute_enabled; }
+
+bool GainMixerNode::CleanUp() {
+ CallCleanUpOnInputNodes();
+ // Prevent node from being disconnected when all sources are removed.
+ return false;
+}
+
+const AudioBuffer* GainMixerNode::AudioProcess(const NodeInput& input) {
+
+
+ if (mute_enabled_) {
+ // Skip processing and output nullptr audio buffer.
+ return nullptr;
+ }
+
+ // Apply the gain to each input buffer channel.
+ gain_mixer_.Reset();
+ for (auto input_buffer : input.GetInputBuffers()) {
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(input_buffer->source_id());
+ if (source_parameters != nullptr) {
+ const float target_gain =
+ source_parameters->attenuations[attenuation_type_];
+ const size_t num_channels = input_buffer->num_channels();
+ gain_mixer_.AddInput(*input_buffer,
+ std::vector<float>(num_channels, target_gain));
+ }
+ }
+ return gain_mixer_.GetOutput();
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.h
new file mode 100644
index 000000000..df569ccdd
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node.h
@@ -0,0 +1,68 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_GAIN_MIXER_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_GAIN_MIXER_NODE_H_
+
+#include "base/audio_buffer.h"
+#include "base/source_parameters.h"
+#include "dsp/gain_mixer.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts multiple input buffers, calculates and applies a gain
+// value to each buffer based upon the given |AttenuationType| and then mixes
+// the results together.
+class GainMixerNode : public ProcessingNode {
+ public:
+ // Constructs |GainMixerNode| with given gain calculation method.
+ //
+ // @param attenuation_type Gain attenuation type to be used.
+ // @param system_settings Global system settings.
+ // @param num_channels Number of channels.
+ GainMixerNode(const AttenuationType& attenuation_type,
+ const SystemSettings& system_settings, size_t num_channels);
+
+ // Mute the mixer node by skipping the audio processing and outputting nullptr
+ // buffers.
+ void SetMute(bool mute_enabled);
+
+ // Node implementation.
+ bool CleanUp() final;
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ // Flag indicating the mute status.
+ bool mute_enabled_;
+
+ // Gain attenuation type.
+ const AttenuationType attenuation_type_;
+
+ // Gain mixer.
+ GainMixer gain_mixer_;
+
+ // Global system settings.
+ const SystemSettings& system_settings_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_GAIN_MIXER_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node_test.cc
new file mode 100644
index 000000000..6e2a98f81
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_mixer_node_test.cc
@@ -0,0 +1,246 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/gain_mixer_node.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+#include "graph/buffered_source_node.h"
+#include "node/sink_node.h"
+#include "node/source_node.h"
+
+namespace vraudio {
+
+namespace {
+
+// Values to initialize a |SystemSettings| instance.
+const size_t kNumFrames = 4;
+const size_t kSampleRate = 44100;
+
+// Helper class to detect deletion.
+class DeletionDetector {
+ public:
+ explicit DeletionDetector(bool* node_deletion_flag)
+ : node_deletion_flag_(node_deletion_flag) {}
+ ~DeletionDetector() {
+ if (node_deletion_flag_ != nullptr) {
+ *node_deletion_flag_ = true;
+ }
+ }
+
+ private:
+ bool* node_deletion_flag_;
+};
+
+// Wraps |SourceNode| to detect its deletion.
+class MySourceNode : public SourceNode, DeletionDetector {
+ public:
+ explicit MySourceNode(bool* node_deletion_flag)
+ : DeletionDetector(node_deletion_flag) {}
+
+ protected:
+ const AudioBuffer* AudioProcess() final { return nullptr; }
+};
+
+// Wraps |GainMixerNode| to detect its deletion.
+class MyGainMixerNode : public GainMixerNode, DeletionDetector {
+ public:
+ MyGainMixerNode(bool* node_deletion_flag,
+ const AttenuationType& attenuation_type,
+ const SystemSettings& system_settings)
+ : GainMixerNode(attenuation_type, system_settings, kNumMonoChannels),
+ DeletionDetector(node_deletion_flag) {}
+};
+
+// Wraps |SinkNode| to detect its deletion.
+class MySinkNode : public SinkNode, DeletionDetector {
+ public:
+ explicit MySinkNode(bool* node_deletion_flag)
+ : DeletionDetector(node_deletion_flag) {}
+};
+
+// Tests that the |GainMixerNode| keeps connected at the moment all of its
+// sources are removed.
+TEST(AudioNodesTest, CleanUpOnEmptyInputTest) {
+ bool source_node_deleted = false;
+ bool gain_mixer_node_deleted = false;
+ bool sink_node_deleted = false;
+
+ SystemSettings system_settings(kNumMonoChannels, kNumFrames, kSampleRate);
+ auto sink_node = std::make_shared<MySinkNode>(&sink_node_deleted);
+
+ {
+ // Create a source and mixer node and connect it to sink node.
+ auto source_node = std::make_shared<MySourceNode>(&source_node_deleted);
+ auto gain_mixer_node = std::make_shared<MyGainMixerNode>(
+ &gain_mixer_node_deleted, AttenuationType::kInput, system_settings);
+
+ // Connect nodes.
+ sink_node->Connect(gain_mixer_node);
+ gain_mixer_node->Connect(source_node);
+
+ // End of stream is marked in source node. Do not expect any data anymore.
+ source_node->MarkEndOfStream();
+ }
+
+ EXPECT_FALSE(source_node_deleted);
+ EXPECT_FALSE(gain_mixer_node_deleted);
+ EXPECT_FALSE(sink_node_deleted);
+
+ sink_node->CleanUp();
+
+ EXPECT_TRUE(source_node_deleted);
+ EXPECT_FALSE(gain_mixer_node_deleted);
+ EXPECT_FALSE(sink_node_deleted);
+}
+
+// Provides unit tests for |GainMixerNode|.
+class GainMixerNodeTest : public ::testing::Test {
+ protected:
+ GainMixerNodeTest()
+ : system_settings_(kNumMonoChannels, kNumFrames, kSampleRate) {}
+
+ // Helper method to create a new input buffer.
+ //
+ // @return Mono audio buffer filled with test data.
+ std::unique_ptr<AudioBuffer> CreateInputBuffer(
+ const std::vector<float>& input_data) {
+ auto buffer = std::unique_ptr<AudioBuffer>(
+ new AudioBuffer(kNumMonoChannels, input_data.size()));
+ (*buffer)[0] = input_data;
+ return buffer;
+ }
+
+ // Helper method that generates a node graph and returns the processed
+ // output.
+ //
+ // @param num_inputs Number of input buffers to be processed.
+ void CreateGraph(size_t num_inputs) {
+ // Tests will use |AttenuationType::kInput| which directly returns the
+ // local
+ // gain value in order to avoid extra complexity.
+ gain_mixer_node_ = std::make_shared<GainMixerNode>(
+ AttenuationType::kInput, system_settings_, kNumMonoChannels);
+
+ output_node_ = std::make_shared<SinkNode>();
+ output_node_->Connect(gain_mixer_node_);
+
+ buffered_source_nodes_.resize(num_inputs);
+ auto parameters_manager = system_settings_.GetSourceParametersManager();
+ for (size_t i = 0; i < num_inputs; ++i) {
+ const auto source_id = static_cast<SourceId>(i);
+ buffered_source_nodes_[i] = std::make_shared<BufferedSourceNode>(
+ source_id, kNumMonoChannels, kNumFrames);
+ gain_mixer_node_->Connect(buffered_source_nodes_[i]);
+ parameters_manager->Register(source_id);
+ }
+ }
+
+ // Processes input buuffers with gains returning the mixed output.
+ //
+ // @param input_gains Gains to be processed.
+ // @param input_buffers Buffers to be processed.
+ // @return Processed output buffer.
+ const AudioBuffer* Process(
+ float input_gain, const std::vector<std::vector<float>>& input_buffers) {
+ DCHECK_EQ(buffered_source_nodes_.size(), input_buffers.size());
+ for (size_t i = 0; i < input_buffers.size(); ++i) {
+ const auto source_id = static_cast<SourceId>(i);
+ auto input = CreateInputBuffer(input_buffers[i]);
+ // Set the input gain.
+ auto parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ parameters->attenuations[AttenuationType::kInput] = input_gain;
+ // Process the buffer.
+ AudioBuffer* const input_node_buffer =
+ buffered_source_nodes_[i]->GetMutableAudioBufferAndSetNewBufferFlag();
+ *input_node_buffer = *input;
+ }
+ const std::vector<const AudioBuffer*>& outputs = output_node_->ReadInputs();
+ if (!outputs.empty()) {
+ DCHECK_EQ(outputs.size(), 1U);
+ return outputs.front();
+ }
+ return nullptr;
+ }
+
+ // System settings.
+ SystemSettings system_settings_;
+
+ // Component nodes for the simple audio graph.
+ std::shared_ptr<GainMixerNode> gain_mixer_node_;
+ std::vector<std::shared_ptr<BufferedSourceNode>> buffered_source_nodes_;
+ std::shared_ptr<SinkNode> output_node_;
+};
+
+// Tests that the |GainMixerNode| returns the expected output buffers with
+// different gain values.
+TEST_F(GainMixerNodeTest, GainTest) {
+ const float kGain = 0.5f;
+ const std::vector<std::vector<float>> inputs({{1.0f, 1.0f, 1.0f, 1.0f},
+ {2.0f, 2.0f, 2.0f, 2.0f},
+ {3.0f, 3.0f, 3.0f, 3.0f},
+ {4.0f, 4.0f, 4.0f, 4.0f}});
+ // Zero buffer should be returned when the gain value's zero from the start.
+ CreateGraph(inputs.size());
+ auto output = Process(0.0f, inputs);
+ for (size_t i = 0; i < inputs[0].size(); ++i) {
+ EXPECT_NEAR((*output)[0][i], 0.0f, kEpsilonFloat);
+ }
+
+ // A valid output buffer should be returned when the gain value is non-zero.
+ output = Process(kGain, inputs);
+ EXPECT_NEAR((*output)[0][0], 0.0f, kEpsilonFloat);
+ for (size_t i = 1; i < inputs[0].size(); ++i) {
+ EXPECT_FALSE(std::abs((*output)[0][i]) <=
+ std::numeric_limits<float>::epsilon());
+ }
+
+ // Correct values should be returned after gain processor interpolation.
+ for (size_t i = 0; i < kUnitRampLength / 2; ++i) {
+ output = Process(kGain, inputs);
+ }
+ const float output_value =
+ kGain * (inputs[0][0] + inputs[1][0] + inputs[2][0] + inputs[3][0]);
+ for (size_t i = 0; i < inputs[0].size(); ++i) {
+ EXPECT_NEAR((*output)[0][i], output_value, kEpsilonFloat);
+ }
+
+ // A valid output buffer should be returned even when the gain value is zero
+ // while gain processor interpolation.
+ output = Process(0.0f, inputs);
+ for (size_t i = 0; i < inputs[0].size(); ++i) {
+ EXPECT_NE((*output)[0][i], 0.0f);
+ }
+
+ // Zero buffer should be returned after the interpolation is completed.
+ for (size_t i = 0; i < kUnitRampLength / 2; ++i) {
+ output = Process(0.0f, inputs);
+ }
+ for (size_t i = 0; i < inputs[0].size(); ++i) {
+ EXPECT_NEAR((*output)[0][i], 0.0f, kEpsilonFloat);
+ }
+}
+
+} // namespace
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.cc
new file mode 100644
index 000000000..ca1c8edb2
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.cc
@@ -0,0 +1,82 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/gain_node.h"
+
+#include <cmath>
+
+
+#include "dsp/gain.h"
+
+namespace vraudio {
+
+GainNode::GainNode(SourceId source_id, size_t num_channels,
+ const AttenuationType& attenuation_type,
+ const SystemSettings& system_settings)
+ : num_channels_(num_channels),
+ attenuation_type_(attenuation_type),
+ gain_processors_(num_channels_),
+ system_settings_(system_settings),
+ output_buffer_(num_channels, system_settings.GetFramesPerBuffer()) {
+ DCHECK_GT(num_channels, 0U);
+ output_buffer_.set_source_id(source_id);
+}
+
+const AudioBuffer* GainNode::AudioProcess(const NodeInput& input) {
+
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ DCHECK(input_buffer);
+ DCHECK_EQ(input_buffer->num_channels(), num_channels_);
+ DCHECK_EQ(input_buffer->source_id(), output_buffer_.source_id());
+
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(input_buffer->source_id());
+ if (source_parameters == nullptr) {
+ LOG(WARNING) << "Could not find source parameters";
+ return nullptr;
+ }
+
+ const float current_gain = gain_processors_[0].GetGain();
+ const float target_gain = source_parameters->attenuations[attenuation_type_];
+ if (IsGainNearZero(target_gain) && IsGainNearZero(current_gain)) {
+ // Make sure the gain processors are initialized.
+ for (size_t i = 0; i < num_channels_; ++i) {
+ gain_processors_[i].Reset(0.0f);
+ }
+ // Skip processing in case of zero gain.
+ return nullptr;
+ }
+ if (IsGainNearUnity(target_gain) && IsGainNearUnity(current_gain)) {
+ // Make sure the gain processors are initialized.
+ for (size_t i = 0; i < num_channels_; ++i) {
+ gain_processors_[i].Reset(1.0f);
+ }
+ // Skip processing in case of unity gain.
+ return input_buffer;
+ }
+
+ // Apply the gain to each input buffer channel.
+ for (size_t i = 0; i < num_channels_; ++i) {
+ gain_processors_[i].ApplyGain(target_gain, (*input_buffer)[i],
+ &output_buffer_[i],
+ false /* accumulate_output */);
+ }
+
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.h
new file mode 100644
index 000000000..35f8c62b8
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node.h
@@ -0,0 +1,69 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_GAIN_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_GAIN_NODE_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "base/source_parameters.h"
+#include "dsp/gain_processor.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that calculates and applies a gain value to each channel of an input
+// buffer based upon the given |GainCalculator|.
+class GainNode : public ProcessingNode {
+ public:
+ // Constructs |GainNode| with given gain attenuation method.
+ //
+ // @param source_id Output buffer source id.
+ // @param num_channels Number of channels in the input buffer.
+ // @param attenuation_type Gain attenuation type to be used.
+ // @param system_settings Global system settings.
+ GainNode(SourceId source_id, size_t num_channels,
+ const AttenuationType& attenuation_type,
+ const SystemSettings& system_settings);
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ // Number of channels of the audio buffer.
+ const size_t num_channels_;
+
+ // Gain attenuation type.
+ const AttenuationType attenuation_type_;
+
+ // Gain processors per each channel.
+ std::vector<GainProcessor> gain_processors_;
+
+ // Global system settings.
+ const SystemSettings& system_settings_;
+
+ // Output buffer.
+ AudioBuffer output_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_GAIN_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node_test.cc
new file mode 100644
index 000000000..97391f876
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/gain_node_test.cc
@@ -0,0 +1,138 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/gain_node.h"
+
+#include <iterator>
+#include <memory>
+#include <vector>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "graph/buffered_source_node.h"
+#include "node/sink_node.h"
+
+namespace vraudio {
+
+namespace {
+
+// Values to initialize a |SystemSettings| instance.
+const size_t kNumFrames = 4;
+const size_t kSampleRate = 44100;
+
+// Source id.
+const SourceId kSourceId = 1;
+
+const float kInputData[kNumFrames] = {1.0f, 2.0f, 3.0f, 4.0f};
+
+// Provides unit tests for |GainNode|.
+class GainNodeTest : public ::testing::Test {
+ protected:
+ GainNodeTest()
+ : input_data_(std::begin(kInputData), std::end(kInputData)),
+ system_settings_(kNumMonoChannels, kNumFrames, kSampleRate) {}
+
+ void SetUp() override {
+ // Tests will use |AttenuationType::kInput| which directly returns the input
+ // gain value in order to avoid extra complexity.
+ gain_node_ = std::make_shared<GainNode>(
+ kSourceId, kNumMonoChannels, AttenuationType::kInput, system_settings_);
+ input_buffer_node_ = std::make_shared<BufferedSourceNode>(
+ kSourceId, kNumMonoChannels, kNumFrames);
+ gain_node_->Connect(input_buffer_node_);
+ output_node_ = std::make_shared<SinkNode>();
+ output_node_->Connect(gain_node_);
+ // Register the source parameters.
+ system_settings_.GetSourceParametersManager()->Register(kSourceId);
+ }
+
+ // Helper method to create a new input buffer.
+ //
+ // @return Mono audio buffer filled with test data.
+ std::unique_ptr<AudioBuffer> CreateInputBuffer() {
+ std::unique_ptr<AudioBuffer> buffer(
+ new AudioBuffer(kNumMonoChannels, kNumFrames));
+ (*buffer)[0] = input_data_;
+ return buffer;
+ }
+
+ // Helper method that generates a node graph and returns the processed output.
+ //
+ // @param input_gain Input gain value to be processed.
+ // @return Processed output buffer.
+
+ const AudioBuffer* ProcessGainNode(float input_gain) {
+ // Create a new audio buffer.
+ auto input = CreateInputBuffer();
+ // Update the input gain parameter.
+ auto parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ kSourceId);
+ parameters->attenuations[AttenuationType::kInput] = input_gain;
+ // Process the buffer.
+ AudioBuffer* const input_node_buffer =
+ input_buffer_node_->GetMutableAudioBufferAndSetNewBufferFlag();
+ *input_node_buffer = *input;
+
+ const std::vector<const AudioBuffer*>& outputs = output_node_->ReadInputs();
+ if (!outputs.empty()) {
+ DCHECK_EQ(outputs.size(), 1U);
+ return outputs.front();
+ }
+ return nullptr;
+ }
+
+ private:
+ std::vector<float> input_data_;
+
+ std::shared_ptr<GainNode> gain_node_;
+ std::shared_ptr<BufferedSourceNode> input_buffer_node_;
+ std::shared_ptr<SinkNode> output_node_;
+
+ SystemSettings system_settings_;
+};
+
+// Tests that the gain node returns the expected output buffers with different
+// gain values.
+TEST_F(GainNodeTest, GainTest) {
+ // nullptr should be returned when the gain value is zero from the start.
+ auto output = ProcessGainNode(0.0f);
+
+ EXPECT_TRUE(output == nullptr);
+
+ // A valid output buffer should be returned when the gain value is non-zero.
+ output = ProcessGainNode(0.5f);
+
+ EXPECT_FALSE(output == nullptr);
+
+ // A valid output buffer should be returned even when the gain value is zero
+ // while gain processor interpolation.
+ output = ProcessGainNode(0.0f);
+
+ EXPECT_FALSE(output == nullptr);
+
+ // nullptr should be returned after the interpolation is completed.
+ for (size_t i = 0; i < kUnitRampLength; ++i) {
+ output = ProcessGainNode(0.0f);
+ }
+
+ EXPECT_TRUE(output == nullptr);
+}
+
+} // namespace
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.cc
new file mode 100644
index 000000000..cb5b85590
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.cc
@@ -0,0 +1,290 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/graph_manager.h"
+
+#include <functional>
+
+#include "ambisonics/utils.h"
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+#include "graph/foa_rotator_node.h"
+#include "graph/gain_node.h"
+#include "graph/hoa_rotator_node.h"
+#include "graph/mono_from_soundfield_node.h"
+#include "graph/near_field_effect_node.h"
+#include "graph/occlusion_node.h"
+
+namespace vraudio {
+
+GraphManager::GraphManager(const SystemSettings& system_settings)
+ :
+ room_effects_enabled_(true),
+ config_(GlobalConfig()),
+ system_settings_(system_settings),
+ fft_manager_(system_settings.GetFramesPerBuffer()),
+ output_node_(std::make_shared<SinkNode>()) {
+ CHECK_LE(system_settings.GetFramesPerBuffer(), kMaxSupportedNumFrames);
+
+ stereo_mixer_node_ =
+ std::make_shared<MixerNode>(system_settings_, kNumStereoChannels);
+ output_node_->Connect(stereo_mixer_node_);
+
+ /// Initialize the Ambisonic Lookup Table.
+ lookup_table_.reset(new AmbisonicLookupTable(config_.max_ambisonic_order));
+ // Initialize the Ambisonic Renderer subgraphs.
+ for (const auto& sh_hrir_filename_itr : config_.sh_hrir_filenames) {
+ const int ambisonic_order = sh_hrir_filename_itr.first;
+ const auto& sh_hrir_filename = sh_hrir_filename_itr.second;
+ InitializeAmbisonicRendererGraph(ambisonic_order, sh_hrir_filename);
+ // Initialize the Ambisonic Mixing Encoders for HRTF sound object rendering.
+ ambisonic_mixing_encoder_nodes_[ambisonic_order] =
+ std::make_shared<AmbisonicMixingEncoderNode>(
+ system_settings_, *lookup_table_, ambisonic_order);
+ ambisonic_mixer_nodes_[ambisonic_order]->Connect(
+ ambisonic_mixing_encoder_nodes_[ambisonic_order]);
+ }
+
+ // Stereo mixing panner node used in non-HRTF sound object rendering.
+ stereo_mixing_panner_node_ =
+ std::make_shared<StereoMixingPannerNode>(system_settings_);
+ stereo_mixer_node_->Connect(stereo_mixing_panner_node_);
+
+ // Initialize room effects graphs.
+ InitializeReflectionsGraph();
+ InitializeReverbGraph();
+ // Initialize ambisonic output mixer.
+ ambisonic_output_mixer_.reset(
+ new Mixer(GetNumPeriphonicComponents(config_.max_ambisonic_order),
+ system_settings.GetFramesPerBuffer()));
+}
+
+void GraphManager::CreateAmbisonicSource(SourceId ambisonic_source_id,
+ size_t num_channels) {
+ DCHECK(source_nodes_.find(ambisonic_source_id) == source_nodes_.end());
+ // Create a new |ambisonic_source_node| and register to |source_nodes_|.
+ auto ambisonic_source_node = std::make_shared<BufferedSourceNode>(
+ ambisonic_source_id, num_channels, system_settings_.GetFramesPerBuffer());
+ source_nodes_[ambisonic_source_id] = ambisonic_source_node;
+
+ // Connect |ambisonic_source_node| to the ambisonic decoding pipeline.
+ const int ambisonic_order = GetPeriphonicAmbisonicOrder(num_channels);
+ auto direct_attenuation_node =
+ std::make_shared<GainNode>(ambisonic_source_id, num_channels,
+ AttenuationType::kDirect, system_settings_);
+ direct_attenuation_node->Connect(ambisonic_source_node);
+ if (ambisonic_order == 1) {
+ // First order case.
+ auto foa_rotator_node =
+ std::make_shared<FoaRotatorNode>(ambisonic_source_id, system_settings_);
+ foa_rotator_node->Connect(direct_attenuation_node);
+ ambisonic_mixer_nodes_[ambisonic_order]->Connect(foa_rotator_node);
+ } else {
+ // Higher orders case.
+ auto hoa_rotator_node = std::make_shared<HoaRotatorNode>(
+ ambisonic_source_id, system_settings_, ambisonic_order);
+ hoa_rotator_node->Connect(direct_attenuation_node);
+ ambisonic_mixer_nodes_[ambisonic_order]->Connect(hoa_rotator_node);
+ }
+ // Connect to room effects rendering pipeline.
+ auto mono_from_soundfield_node = std::make_shared<MonoFromSoundfieldNode>(
+ ambisonic_source_id, system_settings_);
+ mono_from_soundfield_node->Connect(ambisonic_source_node);
+ reflections_gain_mixer_node_->Connect(mono_from_soundfield_node);
+ reverb_gain_mixer_node_->Connect(mono_from_soundfield_node);
+}
+
+void GraphManager::CreateSoundObjectSource(SourceId sound_object_source_id,
+ int ambisonic_order,
+ bool enable_hrtf,
+ bool enable_direct_rendering) {
+ DCHECK(source_nodes_.find(sound_object_source_id) == source_nodes_.end());
+ // Create a new |sound_object_source_node| and register to |source_nodes_|.
+ auto sound_object_source_node = std::make_shared<BufferedSourceNode>(
+ sound_object_source_id, kNumMonoChannels,
+ system_settings_.GetFramesPerBuffer());
+ source_nodes_[sound_object_source_id] = sound_object_source_node;
+
+ // Create direct rendering pipeline.
+ if (enable_direct_rendering) {
+ auto direct_attenuation_node =
+ std::make_shared<GainNode>(sound_object_source_id, kNumMonoChannels,
+ AttenuationType::kDirect, system_settings_);
+ direct_attenuation_node->Connect(sound_object_source_node);
+ auto occlusion_node = std::make_shared<OcclusionNode>(
+ sound_object_source_id, system_settings_);
+ occlusion_node->Connect(direct_attenuation_node);
+ auto near_field_effect_node = std::make_shared<NearFieldEffectNode>(
+ sound_object_source_id, system_settings_);
+
+ if (enable_hrtf) {
+ ambisonic_mixing_encoder_nodes_[ambisonic_order]->Connect(occlusion_node);
+ } else {
+ stereo_mixing_panner_node_->Connect(occlusion_node);
+ }
+
+ near_field_effect_node->Connect(occlusion_node);
+ stereo_mixer_node_->Connect(near_field_effect_node);
+ }
+
+ // Connect to room effects rendering pipeline.
+ reflections_gain_mixer_node_->Connect(sound_object_source_node);
+ reverb_gain_mixer_node_->Connect(sound_object_source_node);
+}
+
+void GraphManager::EnableRoomEffects(bool enable) {
+ room_effects_enabled_ = enable;
+ reflections_gain_mixer_node_->SetMute(!room_effects_enabled_);
+ reverb_gain_mixer_node_->SetMute(!room_effects_enabled_);
+}
+
+const AudioBuffer* GraphManager::GetAmbisonicBuffer() const {
+ ambisonic_output_mixer_->Reset();
+ for (const auto& ambisonic_mixer_node_itr : ambisonic_mixer_nodes_) {
+ const auto* ambisonic_buffer =
+ ambisonic_mixer_node_itr.second->GetOutputBuffer();
+ if (ambisonic_buffer != nullptr) {
+ ambisonic_output_mixer_->AddInput(*ambisonic_buffer);
+ }
+ }
+ return ambisonic_output_mixer_->GetOutput();
+}
+
+const AudioBuffer* GraphManager::GetStereoBuffer() const {
+ return stereo_mixer_node_->GetOutputBuffer();
+}
+
+const AudioBuffer *GraphManager::GetReverbBuffer() const
+{
+ return reverb_node_->GetOutputBuffer();
+}
+
+size_t GraphManager::GetNumMaxAmbisonicChannels() const {
+ return GetNumPeriphonicComponents(config_.max_ambisonic_order);
+}
+
+bool GraphManager::GetRoomEffectsEnabled() const {
+ return room_effects_enabled_;
+}
+
+void GraphManager::UpdateRoomReflections() { reflections_node_->Update(); }
+
+void GraphManager::UpdateRoomReverb() { reverb_node_->Update(); }
+
+void GraphManager::InitializeReverbGraph() {
+ reverb_gain_mixer_node_ = std::make_shared<GainMixerNode>(
+ AttenuationType::kReverb, system_settings_, kNumMonoChannels);
+ reverb_node_ = std::make_shared<ReverbNode>(system_settings_, &fft_manager_);
+ reverb_node_->Connect(reverb_gain_mixer_node_);
+ stereo_mixer_node_->Connect(reverb_node_);
+}
+
+void GraphManager::InitializeReflectionsGraph() {
+ reflections_gain_mixer_node_ = std::make_shared<GainMixerNode>(
+ AttenuationType::kReflections, system_settings_, kNumMonoChannels);
+ reflections_node_ = std::make_shared<ReflectionsNode>(system_settings_);
+ reflections_node_->Connect(reflections_gain_mixer_node_);
+ // Reflections are limited to First Order Ambisonics to reduce complexity.
+ const int kAmbisonicOrder1 = 1;
+ ambisonic_mixer_nodes_[kAmbisonicOrder1]->Connect(reflections_node_);
+}
+
+void GraphManager::CreateAmbisonicPannerSource(SourceId sound_object_source_id,
+ bool enable_hrtf) {
+ DCHECK(source_nodes_.find(sound_object_source_id) == source_nodes_.end());
+ // Create a new |sound_object_source_node| and register to |source_nodes_|.
+ auto sound_object_source_node = std::make_shared<BufferedSourceNode>(
+ sound_object_source_id, kNumMonoChannels,
+ system_settings_.GetFramesPerBuffer());
+ source_nodes_[sound_object_source_id] = sound_object_source_node;
+
+ if (enable_hrtf) {
+ ambisonic_mixing_encoder_nodes_[config_.max_ambisonic_order]->Connect(
+ sound_object_source_node);
+ } else {
+ stereo_mixing_panner_node_->Connect(sound_object_source_node);
+ }
+}
+
+void GraphManager::CreateStereoSource(SourceId stereo_source_id) {
+ DCHECK(source_nodes_.find(stereo_source_id) == source_nodes_.end());
+ // Create a new |stereo_source_node| and register to |source_nodes_|.
+ auto stereo_source_node = std::make_shared<BufferedSourceNode>(
+ stereo_source_id, kNumStereoChannels,
+ system_settings_.GetFramesPerBuffer());
+ source_nodes_[stereo_source_id] = stereo_source_node;
+
+ // Connect |stereo_source_node| to the stereo rendering pipeline.
+ auto gain_node =
+ std::make_shared<GainNode>(stereo_source_id, kNumStereoChannels,
+ AttenuationType::kInput, system_settings_);
+ gain_node->Connect(stereo_source_node);
+ stereo_mixer_node_->Connect(gain_node);
+}
+
+void GraphManager::DestroySource(SourceId source_id) {
+ auto source_node = LookupSourceNode(source_id);
+ if (source_node != nullptr) {
+ // Disconnect the source from the graph.
+ source_node->MarkEndOfStream();
+ output_node_->CleanUp();
+ // Unregister the source from |source_nodes_|.
+ source_nodes_.erase(source_id);
+ }
+}
+
+std::shared_ptr<SinkNode> GraphManager::GetSinkNode() { return output_node_; }
+
+void GraphManager::Process() {
+
+ output_node_->ReadInputs();
+}
+
+AudioBuffer* GraphManager::GetMutableAudioBuffer(SourceId source_id) {
+ auto source_node = LookupSourceNode(source_id);
+ if (source_node == nullptr) {
+ return nullptr;
+ }
+ return source_node->GetMutableAudioBufferAndSetNewBufferFlag();
+}
+
+void GraphManager::InitializeAmbisonicRendererGraph(
+ int ambisonic_order, const std::string& sh_hrir_filename) {
+ CHECK_LE(ambisonic_order, config_.max_ambisonic_order);
+ const size_t num_channels = GetNumPeriphonicComponents(ambisonic_order);
+ // Create binaural decoder pipeline.
+ ambisonic_mixer_nodes_[ambisonic_order] =
+ std::make_shared<MixerNode>(system_settings_, num_channels);
+ auto ambisonic_binaural_decoder_node =
+ std::make_shared<AmbisonicBinauralDecoderNode>(
+ system_settings_, ambisonic_order, sh_hrir_filename, &fft_manager_,
+ &resampler_);
+ ambisonic_binaural_decoder_node->Connect(
+ ambisonic_mixer_nodes_[ambisonic_order]);
+ stereo_mixer_node_->Connect(ambisonic_binaural_decoder_node);
+}
+
+std::shared_ptr<BufferedSourceNode> GraphManager::LookupSourceNode(
+ SourceId source_id) {
+ auto source_node_itr = source_nodes_.find(source_id);
+ if (source_node_itr == source_nodes_.end()) {
+ LOG(WARNING) << "Source node " << source_id << " not found";
+ return nullptr;
+ }
+ return source_node_itr->second;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.h b/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.h
new file mode 100644
index 000000000..818b6fa01
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager.h
@@ -0,0 +1,404 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_GRAPH_MANAGER_H_
+#define RESONANCE_AUDIO_GRAPH_GRAPH_MANAGER_H_
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+#include "ambisonics/ambisonic_lookup_table.h"
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "config/global_config.h"
+#include "dsp/fft_manager.h"
+#include "dsp/resampler.h"
+#include "graph/ambisonic_binaural_decoder_node.h"
+#include "graph/ambisonic_mixing_encoder_node.h"
+#include "graph/buffered_source_node.h"
+#include "graph/gain_mixer_node.h"
+#include "graph/mixer_node.h"
+#include "graph/reflections_node.h"
+#include "graph/reverb_node.h"
+#include "graph/stereo_mixing_panner_node.h"
+#include "graph/system_settings.h"
+#include "node/sink_node.h"
+
+namespace vraudio {
+
+// The GraphManager class manages the construction and lifetime of the audio
+// processing graph. It owns the output node that connects the audio processing
+// graph to the audio hardware.
+class GraphManager {
+ public:
+ // Initializes GraphManager class.
+ //
+ // @param system_settings Global system configuration.
+ explicit GraphManager(const SystemSettings& system_settings);
+
+ // Returns the sink node the audio graph is connected to.
+ //
+ // @return Shared pointer of the sink node.
+ std::shared_ptr<SinkNode> GetSinkNode();
+
+ // Triggers processing of the audio graph for all the connected nodes.
+ void Process();
+
+ // Returns a mutable pointer to the |AudioBuffer| of an audio source with
+ // given |source_id|. Calls to this method must be synchronized with the audio
+ // graph processing.
+ //
+ // @param source_id Source id.
+ // @return Mutable audio buffer pointer. Nullptr if source_id not found.
+ AudioBuffer* GetMutableAudioBuffer(SourceId source_id);
+
+ // Creates an ambisonic panner source with given |sound_object_source_id|.
+ //
+ // Processing graph:
+ //
+ // +-------------------+
+ // | |
+ // +------------+ SoundObjectSource +----------+
+ // | | | |
+ // | +---------+---------+ |
+ // | |
+ // +-----------v-----------+ +---------v----------+
+ // | | | |
+ // | AmbisonicMixingPanner | | StereoMixingPanner |
+ // | | | |
+ // +-----------+-----------+ +---------+----------+
+ // | |
+ // +-----------v-----------+ +---------v----------+
+ // | | | |
+ // | AmbisonicMixer | | StereoMixer |
+ // | | | |
+ // +-----------+-----------+ +--------------------+
+ //
+ //
+ // @param sound_object_source_id Id of sound object source.
+ // @param enable_hrtf Flag to enable HRTF-based spatialization.
+ void CreateAmbisonicPannerSource(SourceId sound_object_source_id,
+ bool enable_hrtf);
+
+ // Creates a new stereo non-spatialized source with given |stereo_source_id|.
+ //
+ // Processing graph:
+ //
+ // +--------------+
+ // | |
+ // | StereoSource |
+ // | |
+ // +-------+------+
+ // |
+ // +-------v------+
+ // | |
+ // | Gain |
+ // | |
+ // +-------+------+
+ // |
+ // +-------V------+
+ // | |
+ // | StereoMixer |
+ // | |
+ // +--------------+
+ //
+ // @param stereo_source_id Id of new stereo source.
+ void CreateStereoSource(SourceId stereo_source_id);
+
+ // Destroys source with given |source_id|. Note that this call only sets a
+ // flag to indicate that this source can be removed. The actual disconnect
+ // happens from the audio processing thread the next time the processing graph
+ // is triggered.
+ //
+ // @param source_id Id of source to be destroyed.
+ void DestroySource(SourceId source_id);
+
+ // Creates a new ambisonic source subgraph with given |ambisonic_source_id|.
+ // Note: Ambisonic source subgraph is only created if the rendering mode is
+ // HRTF.
+ //
+ // Processing graph (all the graphs created using http://asciiflow.com/):
+ //
+ // +-----------------+
+ // | |
+ // +-------+ AmbisonicSource +-------+
+ // | | | |
+ // | +-----------------+ |
+ // | |
+ // +----v---+ +----------v---------+
+ // | | | |
+ // | Gain | +--+ MonoFromSoundfield +--+
+ // | | | | | |
+ // +----+---+ | +--------------------+ |
+ // | | |
+ // | | |
+ // +--------v-------+ +--------v---------+ +------v------+
+ // | | | | | |
+ // | Foa/HoaRotator | | ReflectionsMixer | | ReverbMixer |
+ // | | | | | |
+ // +--------+-------+ +--------+---------+ +------+------+
+ // |
+ // +--------v-------+
+ // | |
+ // | AmbisonicMixer |
+ // | |
+ // +--------+-------+
+ //
+ // @param ambisonic_source_id Id of new ambisonic source.
+ // @param num_channels Number of input channels of ambisonic source node.
+ void CreateAmbisonicSource(SourceId ambisonic_source_id, size_t num_channels);
+
+ // Creates a new sound object source with given |sound_object_source_id|.
+ //
+ // Processing graph:
+ //
+ // +-------------------+
+ // | |
+ // +-------------+ SoundObjectSource +----------+
+ // | | | |
+ // | +---------+---------+ |
+ // | | |
+ // +----------v-----------+ +---------v---------+ +--------v--------+
+ // | | | | | |
+ // | ReflectionsGainMixer | | DirectAttenuation | | ReverbGainMixer |
+ // | | | | | |
+ // +----------+-----------+ +---------+---------+ +--------+--------+
+ // |
+ // +---------v---------+
+ // HRTF | | Stereo Panning
+ // +------------+ Occlusion +----------+
+ // | | | |
+ // | +---------+---------+ |
+ // | | |
+ // +-----------v-----------+ +--------v--------+ +---------v----------+
+ // | | | | | |
+ // | AmbisonicMixingPanner | | NearFieldEffect | | StereoMixingPanner |
+ // | | | | | |
+ // +-----------+-----------+ +--------+--------+ +---------+----------+
+ // | | |
+ // +-----------v-----------+ +--------v--------+ |
+ // | | | | |
+ // | AmbisonicMixer | | StereoMixer <-----------+
+ // | | | |
+ // +-----------+-----------+ +-----------------+
+ //
+ //
+ // @param sound_object_source_id Id of sound object source.
+ // @param ambisonic_order Ambisonic order to encode the sound object source.
+ // @param enable_hrtf Flag to enable HRTF-based rendering.
+ // @param enable_direct_rendering Flag to enable direct source rendering.
+ void CreateSoundObjectSource(SourceId sound_object_source_id,
+ int ambisonic_order, bool enable_hrtf,
+ bool enable_direct_rendering);
+
+ // Mutes on/off the room effects mixers.
+ //
+ // @param Whether to enable room effects.
+ void EnableRoomEffects(bool enable);
+
+ // Returns the last processed output audio buffer of the ambisonic mix with
+ // the highest possible ambisonic channel configuration. Note that, this
+ // method will *not* trigger the processing of the audio graph.
+ // |GraphManager::Process| must be called prior to this method call to ensure
+ // that the output buffer is up-to-date.
+ //
+ // @return Output audio buffer of the ambisonic mix, or nullptr if no output.
+ const AudioBuffer* GetAmbisonicBuffer() const;
+
+ // Returns the last processed output audio buffer of the stereo (binaural)
+ // mix. Note that, this method will *not* trigger the processing of the audio
+ // graph. |GraphManager::Process| must be called prior to this method call to
+ // ensure that the output buffer is up-to-date.
+ //
+ // @return Output audio buffer of the stereo mix, or nullptr if no output.
+ const AudioBuffer* GetStereoBuffer() const;
+
+ // Returns the last processed buffer containing the reverb data for the room.
+ // The buffer contains stereo data.
+ // Note that, this method will *not* trigger the processing of the audio
+ // graph. |GraphManager::Process| must be called prior to this method call to
+ // ensure that the buffer is up-to-date.
+ //
+ // @return Room reverb audio buffer, or nullptr if no output.
+ const AudioBuffer* GetReverbBuffer() const;
+
+ // Returns the maximum allowed number of ambisonic channels.
+ //
+ // @return Number of channels based on Ambisonic order in the global config.
+ size_t GetNumMaxAmbisonicChannels() const;
+
+ // Returns whether the room effects graph is enabled.
+ //
+ // @return True if room effects are enabled.
+ bool GetRoomEffectsEnabled() const;
+
+ // Updates the room reflections with the current properties for room effects
+ // processing.
+ void UpdateRoomReflections();
+
+ // Updates the room reverb.
+ void UpdateRoomReverb();
+
+ private:
+ // Initializes the Ambisonic renderer subgraph for the speficied Ambisonic
+ // order and connects it to the |StereoMixerNode|.
+ //
+ // Processing graph:
+ //
+ // +------------------+
+ // | |
+ // | AmbisonicMixer |
+ // | |
+ // +--------+---------+
+ // |
+ // |
+ // +------------v-------------+
+ // | |
+ // | AmbisonicBinauralDecoder |
+ // | |
+ // +------------+-------------+
+ // |
+ // |
+ // +-----------v------------+
+ // | |
+ // | StereoMixer |
+ // | |
+ // +------------------------+
+ //
+ // @param ambisonic_order Ambisonic order.
+ // @param sh_hrir_filename Filename to load the HRIR data from.
+ void InitializeAmbisonicRendererGraph(int ambisonic_order,
+ const std::string& sh_hrir_filename);
+
+ // Helper method to lookup a source node with given |source_id|.
+ //
+ // @param source_id Source id.
+ // @returns Shared pointer to source node instance, nullptr if not found.
+ std::shared_ptr<BufferedSourceNode> LookupSourceNode(SourceId source_id);
+
+ // Creates an audio subgraph that renders early reflections based on a room
+ // model on a single mix.
+ //
+ // Processing graph:
+ //
+ // +---------------------------+
+ // | |
+ // | ReflectionsGainMixer |
+ // | |
+ // +-------------+-------------+
+ // |
+ // +----------v----------+
+ // | |
+ // | Reflections |
+ // | |
+ // +----------+----------+
+ // |
+ // +----------v----------+
+ // | |
+ // | AmbisonicMixer |
+ // | |
+ // +----------+----------+
+ //
+ void InitializeReflectionsGraph();
+
+ // Creates an audio subgraph that renders a reverb from a mono mix of all the
+ // sound objects based on a room model.
+ //
+ // Processing graph:
+ //
+ // +-----------------+
+ // | |
+ // | ReverbGainMixer |
+ // | |
+ // +--------+--------+
+ // |
+ // +--------v--------+
+ // | |
+ // | Reverb |
+ // | |
+ // +--------+--------+
+ // |
+ // +--------v--------+
+ // | |
+ // | StereoMixer |
+ // | |
+ // +-----------------+
+ //
+ void InitializeReverbGraph();
+
+ // Flag indicating if room effects are enabled.
+ bool room_effects_enabled_;
+
+ // Mono mixer to accumulate all reverb sources.
+ std::shared_ptr<GainMixerNode> reverb_gain_mixer_node_;
+
+ // Reflections node.
+ std::shared_ptr<ReflectionsNode> reflections_node_;
+
+ // Mono mixer node to accumulate the early reflection sources.
+ std::shared_ptr<GainMixerNode> reflections_gain_mixer_node_;
+
+ // Reverb node.
+ std::shared_ptr<ReverbNode> reverb_node_;
+
+ // Ambisonic output mixer to accumulate incoming ambisonic inputs into a
+ // single ambisonic output buffer.
+ std::unique_ptr<Mixer> ambisonic_output_mixer_;
+
+ // Global config passed in during construction.
+ const GraphManagerConfig config_;
+
+ // Manages system wide settings.
+ const SystemSettings& system_settings_;
+
+ // Provides Ambisonic encoding coefficients.
+ std::unique_ptr<AmbisonicLookupTable> lookup_table_;
+
+ // |FftManager| to be used in nodes that require FFT transformations.
+ FftManager fft_manager_;
+
+ // |Resampler| to be used to convert HRIRs to the system sample rate.
+ Resampler resampler_;
+
+ // Ambisonic mixer nodes per each ambisonic order to accumulate the
+ // ambisonic sources for the corresponding binaural Ambisonic decoders.
+ std::unordered_map<int, std::shared_ptr<MixerNode>> ambisonic_mixer_nodes_;
+
+ // Stereo mixer to combine all the stereo and binaural output.
+ std::shared_ptr<MixerNode> stereo_mixer_node_;
+
+ // Ambisonic mixing encoder node to apply encoding coefficients and accumulate
+ // the Ambisonic buffers.
+ std::unordered_map<int, std::shared_ptr<AmbisonicMixingEncoderNode>>
+ ambisonic_mixing_encoder_nodes_;
+
+ // Stereo mixing panner node to apply stereo panning gains and accumulate the
+ // buffers.
+ std::shared_ptr<StereoMixingPannerNode> stereo_mixing_panner_node_;
+
+ // Output node that enables audio playback of a single audio stream.
+ std::shared_ptr<SinkNode> output_node_;
+
+ // Holds all registered source nodes (independently of their type) and
+ // allows look up by id.
+ std::unordered_map<SourceId, std::shared_ptr<BufferedSourceNode>>
+ source_nodes_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_GRAPH_MANAGER_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager_config.h b/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager_config.h
new file mode 100644
index 000000000..f3c310a06
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/graph_manager_config.h
@@ -0,0 +1,40 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_GRAPH_MANAGER_CONFIG_H_
+#define RESONANCE_AUDIO_GRAPH_GRAPH_MANAGER_CONFIG_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace vraudio {
+
+// Configuration of the GraphManager and the nodes it is instantiating.
+struct GraphManagerConfig {
+ // Configuration name.
+ std::string configuration_name;
+
+ // Maximum ambisonic order allowed.
+ int max_ambisonic_order = 1;
+
+ // HRIR filenames (second element) per ambisonic order (first element).
+ std::vector<std::pair<int, std::string>> sh_hrir_filenames = {};
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_GRAPH_MANAGER_CONFIG_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.cc
new file mode 100644
index 000000000..4ffae89c3
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.cc
@@ -0,0 +1,69 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/hoa_rotator_node.h"
+
+#include "ambisonics/utils.h"
+#include "base/logging.h"
+
+
+namespace vraudio {
+
+HoaRotatorNode::HoaRotatorNode(SourceId source_id,
+ const SystemSettings& system_settings,
+ int ambisonic_order)
+ : system_settings_(system_settings),
+ hoa_rotator_(ambisonic_order),
+ output_buffer_(GetNumPeriphonicComponents(ambisonic_order),
+ system_settings.GetFramesPerBuffer()) {
+ output_buffer_.Clear();
+ output_buffer_.set_source_id(source_id);
+}
+
+const AudioBuffer* HoaRotatorNode::AudioProcess(const NodeInput& input) {
+
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ DCHECK(input_buffer);
+ DCHECK_GT(input_buffer->num_frames(), 0U);
+ DCHECK_GE(input_buffer->num_channels(), 4U);
+ DCHECK_EQ(input_buffer->source_id(), output_buffer_.source_id());
+
+ // Rotate soundfield buffer by the inverse head orientation.
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(input_buffer->source_id());
+ if (source_parameters == nullptr) {
+ LOG(WARNING) << "Could not find source parameters";
+ return nullptr;
+ }
+
+ const WorldRotation& source_rotation =
+ source_parameters->object_transform.rotation;
+ const WorldRotation inverse_head_rotation =
+ system_settings_.GetHeadRotation().conjugate();
+ const WorldRotation rotation = inverse_head_rotation * source_rotation;
+ const bool rotation_applied =
+ hoa_rotator_.Process(rotation, *input_buffer, &output_buffer_);
+
+ if (!rotation_applied) {
+ return input_buffer;
+ }
+
+ // Copy buffer parameters.
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.h
new file mode 100644
index 000000000..1ad3442be
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/hoa_rotator_node.h
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_HOA_ROTATOR_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_HOA_ROTATOR_NODE_H_
+
+#include <memory>
+
+#include "ambisonics/hoa_rotator.h"
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts a single PeriphonicSoundfieldBuffer as input and outputs a
+// rotated PeriphonicSoundfieldBuffer of the corresponding soundfield input
+// using head rotation information from the system settings.
+class HoaRotatorNode : public ProcessingNode {
+ public:
+ HoaRotatorNode(SourceId source_id, const SystemSettings& system_settings,
+ int ambisonic_order);
+
+ protected:
+ // Implements ProcessingNode. Returns a null pointer if we are in stereo
+ // loudspeaker or stereo pan mode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const SystemSettings& system_settings_;
+
+ // Soundfield rotator used to rotate higher order soundfields.
+ HoaRotator hoa_rotator_;
+
+ // Output buffer.
+ AudioBuffer output_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_HOA_ROTATOR_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.cc
new file mode 100644
index 000000000..8bb91c767
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.cc
@@ -0,0 +1,57 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/mixer_node.h"
+
+
+
+namespace vraudio {
+
+MixerNode::MixerNode(const SystemSettings& system_settings, size_t num_channels)
+ : num_channels_(num_channels),
+ mixer_(num_channels_, system_settings.GetFramesPerBuffer()) {
+ DCHECK_NE(num_channels_, 0U);
+ EnableProcessOnEmptyInput(true);
+}
+
+const AudioBuffer* MixerNode::GetOutputBuffer() const {
+ return mixer_.GetOutput();
+}
+
+bool MixerNode::CleanUp() {
+ CallCleanUpOnInputNodes();
+ // Prevent node from being disconnected when all sources are removed.
+ return false;
+}
+
+const AudioBuffer* MixerNode::AudioProcess(const NodeInput& input) {
+
+
+ mixer_.Reset();
+
+ const auto& input_buffers = input.GetInputBuffers();
+ if (input_buffers.empty()) {
+ return nullptr;
+ }
+
+ for (auto input_buffer : input_buffers) {
+ DCHECK_EQ(input_buffer->num_channels(), num_channels_);
+ mixer_.AddInput(*input_buffer);
+ }
+ return mixer_.GetOutput();
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.h
new file mode 100644
index 000000000..0fe88f98b
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node.h
@@ -0,0 +1,54 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_MIXER_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_MIXER_NODE_H_
+
+#include "base/audio_buffer.h"
+#include "dsp/mixer.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Accepts multiple input buffers and outputs a downmix to a single output
+// buffer. All input buffers must have the same number of channels and the same
+// number of frames per buffer.
+class MixerNode : public ProcessingNode {
+ public:
+ MixerNode(const SystemSettings& system_settings, size_t num_channels);
+
+ // Returns the current output buffer of the mixer.
+ //
+ // @return Output audio buffer.
+ const AudioBuffer* GetOutputBuffer() const;
+
+ // Node implementation.
+ bool CleanUp() final;
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const size_t num_channels_;
+
+ Mixer mixer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_MIXER_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node_test.cc
new file mode 100644
index 000000000..db9e72e17
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/mixer_node_test.cc
@@ -0,0 +1,112 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/mixer_node.h"
+
+#include <algorithm>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "base/audio_buffer.h"
+#include "base/logging.h"
+#include "graph/system_settings.h"
+#include "node/sink_node.h"
+#include "node/source_node.h"
+
+namespace vraudio {
+
+namespace {
+
+// Helper class to detect destruction.
+class DeletionDetector {
+ public:
+ explicit DeletionDetector(bool* node_deletion_flag)
+ : node_deletion_flag_(node_deletion_flag) {}
+ ~DeletionDetector() {
+ if (node_deletion_flag_ != nullptr) {
+ *node_deletion_flag_ = true;
+ }
+ }
+
+ private:
+ bool* node_deletion_flag_;
+};
+
+// Wraps |SourceNode| to detect its deletion.
+class MySourceNode : public SourceNode, DeletionDetector {
+ public:
+ explicit MySourceNode(bool* node_deletion_flag)
+ : SourceNode(), DeletionDetector(node_deletion_flag) {}
+
+ protected:
+ const AudioBuffer* AudioProcess() final { return nullptr; }
+};
+
+// Wraps |MixerNode| to detect its deletion.
+class MyAudioMixerNode : public MixerNode, DeletionDetector {
+ public:
+ explicit MyAudioMixerNode(bool* node_deletion_flag,
+ const SystemSettings& system_settings)
+ : MixerNode(system_settings, kNumMonoChannels),
+ DeletionDetector(node_deletion_flag) {}
+};
+
+// Wraps |SinkNode| to detect its deletion.
+class MySinkNode : public SinkNode, DeletionDetector {
+ public:
+ explicit MySinkNode(bool* node_deletion_flag)
+ : SinkNode(), DeletionDetector(node_deletion_flag) {}
+};
+
+// Tests that the |MixerNode| keeps connected at the moment all of its sources
+// are removed.
+TEST(AudioNodesTest, cleanUpOnEmptyInputTest) {
+ SystemSettings system_settings_(kNumMonoChannels, 128 /* frames_per_buffer */,
+ 48000 /* sample_rate_hz */);
+
+ bool source_node_deleted = false;
+ bool mixer_node_deleted = false;
+ bool sink_node_deleted = false;
+
+ auto sink_node = std::make_shared<MySinkNode>(&sink_node_deleted);
+
+ {
+ // Create a source and mixer node and connect it to sink node.
+ auto source_node = std::make_shared<MySourceNode>(&source_node_deleted);
+ auto mixer_node = std::make_shared<MyAudioMixerNode>(&mixer_node_deleted,
+ system_settings_);
+
+ // Connect nodes.
+ sink_node->Connect(mixer_node);
+ mixer_node->Connect(source_node);
+
+ // End of stream is marked in source node. Do not expect any data anymore.
+ source_node->MarkEndOfStream();
+ }
+
+ EXPECT_FALSE(source_node_deleted);
+ EXPECT_FALSE(mixer_node_deleted);
+ EXPECT_FALSE(sink_node_deleted);
+
+ sink_node->CleanUp();
+
+ EXPECT_TRUE(source_node_deleted);
+ EXPECT_FALSE(mixer_node_deleted);
+ EXPECT_FALSE(sink_node_deleted);
+}
+
+} // namespace
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.cc
new file mode 100644
index 000000000..0519be5ef
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.cc
@@ -0,0 +1,45 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/mono_from_soundfield_node.h"
+
+#include "base/constants_and_types.h"
+
+namespace vraudio {
+
+MonoFromSoundfieldNode::MonoFromSoundfieldNode(
+ SourceId source_id, const SystemSettings& system_settings)
+ : output_buffer_(kNumMonoChannels, system_settings.GetFramesPerBuffer()) {
+ output_buffer_.set_source_id(source_id);
+ output_buffer_.Clear();
+}
+
+const AudioBuffer* MonoFromSoundfieldNode::AudioProcess(
+ const NodeInput& input) {
+
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ DCHECK(input_buffer);
+ DCHECK_EQ(input_buffer->source_id(), output_buffer_.source_id());
+ DCHECK_NE(input_buffer->num_channels(), 0U);
+ DCHECK_EQ(input_buffer->num_frames(), output_buffer_.num_frames());
+ // Get W channel of the ambisonic input.
+ output_buffer_[0] = (*input_buffer)[0];
+
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.h
new file mode 100644
index 000000000..6e8525e17
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/mono_from_soundfield_node.h
@@ -0,0 +1,44 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_MONO_FROM_SOUNDFIELD_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_MONO_FROM_SOUNDFIELD_NODE_H_
+
+#include "base/audio_buffer.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts an ambisonic buffer as input and extracts its W channel
+// onto a mono output buffer.
+class MonoFromSoundfieldNode : public ProcessingNode {
+ public:
+ MonoFromSoundfieldNode(SourceId source_id,
+ const SystemSettings& system_settings);
+
+ protected:
+ // Implements |ProcessingNode|.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ // Mono audio buffer to store output data.
+ AudioBuffer output_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_MONO_FROM_SOUNDFIELD_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.cc
new file mode 100644
index 000000000..2b75ae6e9
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.cc
@@ -0,0 +1,117 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/near_field_effect_node.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/logging.h"
+#include "base/spherical_angle.h"
+
+#include "dsp/distance_attenuation.h"
+#include "dsp/gain.h"
+#include "dsp/stereo_panner.h"
+
+namespace vraudio {
+
+NearFieldEffectNode::NearFieldEffectNode(SourceId source_id,
+ const SystemSettings& system_settings)
+ : pan_gains_({0.0f, 0.0f}),
+ near_field_processor_(system_settings.GetSampleRateHz(),
+ system_settings.GetFramesPerBuffer()),
+ system_settings_(system_settings),
+ output_buffer_(kNumStereoChannels, system_settings.GetFramesPerBuffer()) {
+ output_buffer_.set_source_id(source_id);
+}
+
+const AudioBuffer* NearFieldEffectNode::AudioProcess(const NodeInput& input) {
+
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ DCHECK(input_buffer);
+ DCHECK_EQ(input_buffer->num_channels(), 1U);
+ DCHECK_EQ(input_buffer->source_id(), output_buffer_.source_id());
+
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(input_buffer->source_id());
+ if (source_parameters == nullptr) {
+ LOG(WARNING) << "Could not find source parameters";
+ return nullptr;
+ }
+
+
+ DCHECK_EQ(pan_gains_.size(), kNumStereoChannels);
+ const float near_field_gain = source_parameters->near_field_gain;
+ if (near_field_gain > 0.0f) {
+ const auto& listener_position = system_settings_.GetHeadPosition();
+ const auto& listener_rotation = system_settings_.GetHeadRotation();
+ const auto& source_transform = source_parameters->object_transform;
+ // Compute the relative source direction in spherical angles to calculate
+ // the left and right panner gains.
+ WorldPosition relative_direction;
+ GetRelativeDirection(listener_position, listener_rotation,
+ source_transform.position, &relative_direction);
+ const auto source_direction =
+ SphericalAngle::FromWorldPosition(relative_direction);
+ CalculateStereoPanGains(source_direction, &pan_gains_);
+ // Combine pan gains with per-source near field gain.
+ const float total_near_field_gain =
+ ComputeNearFieldEffectGain(listener_position,
+ source_transform.position) *
+ near_field_gain / kMaxNearFieldEffectGain;
+ for (size_t i = 0; i < pan_gains_.size(); ++i) {
+ pan_gains_[i] *= total_near_field_gain;
+ }
+ } else {
+ // Disable near field effect if |near_field_gain| is zero.
+ std::fill(pan_gains_.begin(), pan_gains_.end(), 0.0f);
+ }
+
+ const float left_current_gain = left_panner_.GetGain();
+ const float right_current_gain = right_panner_.GetGain();
+ const float left_target_gain = pan_gains_[0];
+ const float right_target_gain = pan_gains_[1];
+ const bool is_left_zero_gain =
+ IsGainNearZero(left_current_gain) && IsGainNearZero(left_target_gain);
+ const bool is_right_zero_gain =
+ IsGainNearZero(right_current_gain) && IsGainNearZero(right_target_gain);
+
+ if (is_left_zero_gain && is_right_zero_gain) {
+ // Make sure gain processors are initialized.
+ left_panner_.Reset(0.0f);
+ right_panner_.Reset(0.0f);
+ // Both channels go to zero, there is no need for further processing.
+ return nullptr;
+ }
+
+ const auto& input_channel = (*input_buffer)[0];
+ auto* left_output_channel = &output_buffer_[0];
+ auto* right_output_channel = &output_buffer_[1];
+ // Apply bass boost and delay compensation (if necessary) to the input signal
+ // and place it temporarily in the right output channel. This way we avoid
+ // allocating a temporary buffer.
+ near_field_processor_.Process(input_channel, right_output_channel,
+ source_parameters->enable_hrtf);
+ left_panner_.ApplyGain(left_target_gain, *right_output_channel,
+ left_output_channel, /*accumulate_output=*/false);
+ right_panner_.ApplyGain(right_target_gain, *right_output_channel,
+ right_output_channel, /*accumulate_output=*/false);
+
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.h
new file mode 100644
index 000000000..83b8c6d69
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node.h
@@ -0,0 +1,69 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_NEAR_FIELD_EFFECT_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_NEAR_FIELD_EFFECT_NODE_H_
+
+#include <vector>
+
+#include "base/audio_buffer.h"
+#include "base/constants_and_types.h"
+#include "dsp/gain_processor.h"
+#include "dsp/near_field_processor.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts a single mono audio buffer as input, applies an appoximate
+// near field effect and outputs a processed stereo audio buffer. The stereo
+// output buffer can then be combined with a binaural output in order to
+// simulate a sound source which is close (<1m) to the listener's head.
+class NearFieldEffectNode : public ProcessingNode {
+ public:
+ // Constructor.
+ //
+ // @param source_id Output buffer source id.
+ // @param system_settings Global system settings.
+ NearFieldEffectNode(SourceId source_id,
+ const SystemSettings& system_settings);
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ // Left and right processors apply both near field gain and panner gains.
+ GainProcessor left_panner_;
+ GainProcessor right_panner_;
+
+ // Left and right gains for near field and panning combined.
+ std::vector<float> pan_gains_;
+
+ // Near field processor used to apply approximate near field effect to the
+ // mono source signal.
+ NearFieldProcessor near_field_processor_;
+
+ // Used to obtain head rotation.
+ const SystemSettings& system_settings_;
+
+ // Output buffer.
+ AudioBuffer output_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_NEAR_FIELD_EFFECT_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node_test.cc
new file mode 100644
index 000000000..1117ccd21
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/near_field_effect_node_test.cc
@@ -0,0 +1,127 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/near_field_effect_node.h"
+
+#include <memory>
+#include <vector>
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "base/constants_and_types.h"
+#include "dsp/distance_attenuation.h"
+#include "dsp/stereo_panner.h"
+#include "graph/buffered_source_node.h"
+#include "node/sink_node.h"
+#include "node/source_node.h"
+#include "utils/test_util.h"
+
+namespace vraudio {
+
+namespace {
+
+// Source Id for use with |BufferedSourceNode|.
+const SourceId kSourceId = 0;
+
+// Number of frames per buffer.
+const size_t kFramesPerBuffer = kUnitRampLength;
+
+// Sampling rate.
+const int kSampleRate = 48000;
+
+// Source distances.
+const size_t kNumDistances = 5;
+const float kDistances[kNumDistances] = {0.0f, 0.25f, 0.5f, 0.75f, 10.0f};
+
+// Maximum expected gain change determines the number of buffers we need to wait
+// for the output sample values to settle.
+const size_t kMaxExpectedGainChange = 9;
+
+// Expected offset due to near field processor delay compensation at 48000kHz.
+const size_t kExpectedDelay = 31;
+
+// Expected Dirac pulse attenuation due to shelf-filtering at 48000kHz.
+const float KExpectedPeakReduction = 0.87319273f;
+
+} // namespace
+
+TEST(NearFieldEffectNodeTest, VariousDistanceTest) {
+ const size_t kDiracOffset = kFramesPerBuffer / 2;
+ // We add one extra buffer since the Dirac is in the middle of the buffer.
+ const size_t kBuffersToSettle = kMaxExpectedGainChange + 1;
+ const auto kIdentityRotation = WorldRotation();
+ SystemSettings system_settings(kNumStereoChannels, kFramesPerBuffer,
+ kSampleRate);
+
+ // Create the simple audio graph.
+ auto near_field_effect_node =
+ std::make_shared<NearFieldEffectNode>(kSourceId, system_settings);
+ auto input_node = std::make_shared<BufferedSourceNode>(
+ kSourceId, kNumMonoChannels, kFramesPerBuffer);
+ auto output_node = std::make_shared<SinkNode>();
+ near_field_effect_node->Connect(input_node);
+ output_node->Connect(near_field_effect_node);
+ auto parameters_manager = system_settings.GetSourceParametersManager();
+ parameters_manager->Register(kSourceId);
+
+ const AudioBuffer* output_buffer;
+ for (size_t i = 0; i < kNumDistances; ++i) {
+ const WorldPosition input_position(kDistances[i], 0.0f, 0.0f);
+ // Loop till gain processors have reached steady state.
+ for (size_t settle = 0; settle < kBuffersToSettle; ++settle) {
+ AudioBuffer* const input_node_buffer =
+ input_node->GetMutableAudioBufferAndSetNewBufferFlag();
+ GenerateDiracImpulseFilter(kDiracOffset, &(*input_node_buffer)[0]);
+
+ auto source_parameters =
+ parameters_manager->GetMutableParameters(kSourceId);
+ source_parameters->object_transform.position = input_position;
+ source_parameters->near_field_gain = kMaxNearFieldEffectGain;
+ // Retrieve the output.
+ const auto& buffer_vector = output_node->ReadInputs();
+ if (!buffer_vector.empty()) {
+ EXPECT_EQ(buffer_vector.size(), 1U);
+ output_buffer = buffer_vector.front();
+ } else {
+ output_buffer = nullptr;
+ }
+ }
+
+ std::vector<float> stereo_pan_gains(kNumStereoChannels, 0.0f);
+ // These methods are tested elsewhere. Their output will be used to
+ // determine if the output from the |NearfieldEffectNode| is correct.
+ WorldPosition relative_direction;
+ GetRelativeDirection(system_settings.GetHeadPosition(), kIdentityRotation,
+ input_position, &relative_direction);
+ const SphericalAngle source_direction =
+ SphericalAngle::FromWorldPosition(relative_direction);
+ CalculateStereoPanGains(source_direction, &stereo_pan_gains);
+ const float near_field_gain = ComputeNearFieldEffectGain(
+ system_settings.GetHeadPosition(), input_position);
+ if (i < kNumDistances - 1) {
+ EXPECT_FALSE(output_buffer == nullptr);
+ EXPECT_NEAR(
+ near_field_gain * stereo_pan_gains[0] * KExpectedPeakReduction,
+ (*output_buffer)[0][kDiracOffset + kExpectedDelay], kEpsilonFloat);
+ EXPECT_NEAR(
+ near_field_gain * stereo_pan_gains[1] * KExpectedPeakReduction,
+ (*output_buffer)[1][kDiracOffset + kExpectedDelay], kEpsilonFloat);
+ } else {
+ EXPECT_TRUE(output_buffer == nullptr);
+ }
+ }
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.cc
new file mode 100644
index 000000000..3805836f5
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.cc
@@ -0,0 +1,101 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/occlusion_node.h"
+
+#include <cmath>
+
+#include "base/logging.h"
+#include "base/spherical_angle.h"
+
+#include "dsp/occlusion_calculator.h"
+
+namespace vraudio {
+
+namespace {
+
+// Low pass filter coefficient for smoothing the applied occlusion. This avoids
+// sudden unrealistic changes in the volume of a sound object. Range [0, 1].
+// The value below has been calculated empirically.
+const float kOcclusionSmoothingCoefficient = 0.75f;
+
+// This function provides first order low-pass filtering. It is used to smooth
+// the occlusion parameter.
+float Interpolate(float coefficient, float previous_value, float target_value) {
+ return target_value + coefficient * (previous_value - target_value);
+}
+
+} // namespace
+
+OcclusionNode::OcclusionNode(SourceId source_id,
+ const SystemSettings& system_settings)
+ : system_settings_(system_settings),
+ low_pass_filter_(0.0f),
+ current_occlusion_(0.0f),
+ output_buffer_(kNumMonoChannels, system_settings.GetFramesPerBuffer()) {
+ output_buffer_.Clear();
+ output_buffer_.set_source_id(source_id);
+}
+
+const AudioBuffer* OcclusionNode::AudioProcess(const NodeInput& input) {
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ DCHECK(input_buffer);
+ DCHECK_EQ(input_buffer->source_id(), output_buffer_.source_id());
+
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(input_buffer->source_id());
+ if (source_parameters == nullptr) {
+ LOG(WARNING) << "Could not find source parameters";
+ return nullptr;
+ }
+
+ const WorldPosition& listener_position = system_settings_.GetHeadPosition();
+ const WorldRotation& listener_rotation = system_settings_.GetHeadRotation();
+ const ObjectTransform& source_transform = source_parameters->object_transform;
+ // Compute the relative listener/source direction in spherical angles.
+ WorldPosition relative_direction;
+ GetRelativeDirection(listener_position, listener_rotation,
+ source_transform.position, &relative_direction);
+ const SphericalAngle listener_direction =
+ SphericalAngle::FromWorldPosition(relative_direction);
+
+ GetRelativeDirection(source_transform.position, source_transform.rotation,
+ listener_position, &relative_direction);
+ const SphericalAngle source_direction =
+ SphericalAngle::FromWorldPosition(relative_direction);
+ // Calculate low-pass filter coefficient based on listener/source directivity
+ // and occlusion values.
+ const float listener_directivity = CalculateDirectivity(
+ source_parameters->listener_directivity_alpha,
+ source_parameters->listener_directivity_order, listener_direction);
+ const float source_directivity = CalculateDirectivity(
+ source_parameters->directivity_alpha,
+ source_parameters->directivity_order, source_direction);
+ current_occlusion_ =
+ Interpolate(kOcclusionSmoothingCoefficient, current_occlusion_,
+ source_parameters->occlusion_intensity);
+ const float filter_coefficient = CalculateOcclusionFilterCoefficient(
+ listener_directivity * source_directivity, current_occlusion_);
+ low_pass_filter_.SetCoefficient(filter_coefficient);
+ if (!low_pass_filter_.Filter((*input_buffer)[0], &output_buffer_[0])) {
+ return input_buffer;
+ }
+ // Copy buffer parameters.
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.h
new file mode 100644
index 000000000..316788751
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node.h
@@ -0,0 +1,59 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_OCCLUSION_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_OCCLUSION_NODE_H_
+
+#include "base/audio_buffer.h"
+#include "dsp/mono_pole_filter.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts a single audio buffer as input and outputs the input buffer
+// with its cuttoff frequency scaled by listener/source directivity and
+// occlusion intensity.
+class OcclusionNode : public ProcessingNode {
+ public:
+ // Constructor.
+ //
+ // @param source_id Output buffer source id.
+ // @param system_settings Global system settings.
+ OcclusionNode(SourceId source_id, const SystemSettings& system_settings);
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ friend class OcclusionNodeTest;
+
+ const SystemSettings& system_settings_;
+
+ // Used to low-pass input audio when a source is occluded or self-occluded.
+ MonoPoleFilter low_pass_filter_;
+
+ // Occlusion intensity value for the current input buffer.
+ float current_occlusion_;
+
+ // Output buffer.
+ AudioBuffer output_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_OCCLUSION_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node_test.cc
new file mode 100644
index 000000000..3dac808c3
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/occlusion_node_test.cc
@@ -0,0 +1,186 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/occlusion_node.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+#include "base/constants_and_types.h"
+#include "utils/test_util.h"
+
+namespace vraudio {
+
+namespace {
+
+// Number of frames per buffer.
+const size_t kFramesPerBuffer = 256;
+
+// Sampling rate.
+const int kSampleRate = 48000;
+
+// Generated sawtooth length for test buffers.
+const size_t kSawtoothLength = 16;
+
+// Source id.
+const SourceId kSourceId = 1;
+
+} // namespace
+
+class OcclusionNodeTest : public ::testing::Test {
+ protected:
+ OcclusionNodeTest()
+ : system_settings_(kNumStereoChannels, kFramesPerBuffer, kSampleRate) {}
+
+ void SetUp() override {
+ system_settings_.GetSourceParametersManager()->Register(kSourceId);
+ }
+
+ // Function which wraps the buffer passed in a vector for processing and
+ // returns the output of the occlusion nodes AudioProcess method.
+ const AudioBuffer* GetProcessedData(const AudioBuffer* input_buffer,
+ OcclusionNode* occlusion_node) {
+ std::vector<const AudioBuffer*> input_buffers;
+ input_buffers.push_back(input_buffer);
+ return occlusion_node->AudioProcess(
+ ProcessingNode::NodeInput(input_buffers));
+ }
+
+ // Returns a pointer to the parameters of the source.
+ SourceParameters* GetParameters() {
+ return system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ kSourceId);
+ }
+
+ // System settings.
+ SystemSettings system_settings_;
+};
+
+// Test to ensure that no effect is made on a buffer of input if both occlusion
+// and self-occlusion are absent.
+TEST_F(OcclusionNodeTest, NoOcclusionTest) {
+ OcclusionNode occlusion_processor(kSourceId, system_settings_);
+
+ AudioBuffer input(1, kFramesPerBuffer);
+ input.set_source_id(kSourceId);
+ GenerateSawToothSignal(kSawtoothLength, &input[0]);
+
+ const AudioBuffer* output = GetProcessedData(&input, &occlusion_processor);
+ const bool buffers_identical =
+ CompareAudioBuffers((*output)[0], input[0], kEpsilonFloat);
+ EXPECT_TRUE(buffers_identical);
+}
+
+// Test to ensure that a more heavily occluded object shows a lower energy
+// output.
+TEST_F(OcclusionNodeTest, OcclusionTest) {
+ OcclusionNode occlusion_processor_1(kSourceId, system_settings_);
+ OcclusionNode occlusion_processor_2(kSourceId, system_settings_);
+
+ AudioBuffer input(kNumMonoChannels, kFramesPerBuffer);
+ GenerateSawToothSignal(kSawtoothLength, &input[0]);
+ AudioBuffer input_1;
+ input_1 = input;
+ input_1.set_source_id(kSourceId);
+ AudioBuffer input_2;
+ input_2 = input;
+ input_2.set_source_id(kSourceId);
+
+ SourceParameters* parameters = GetParameters();
+
+ parameters->occlusion_intensity = 0.5f;
+ const AudioBuffer* output_1 =
+ GetProcessedData(&input_1, &occlusion_processor_1);
+ parameters->occlusion_intensity = 1.0f;
+ const AudioBuffer* output_2 =
+ GetProcessedData(&input_2, &occlusion_processor_2);
+ const double output_1_energy = CalculateSignalRms((*output_1)[0]);
+ const double output_2_energy = CalculateSignalRms((*output_2)[0]);
+ const double input_energy = CalculateSignalRms(input[0]);
+
+ EXPECT_LT(output_1_energy, input_energy);
+ EXPECT_LT(output_2_energy, output_1_energy);
+}
+
+// Test to ensure that setting a non-omnidirectional listener directivity
+// pattern shows a lower energy output when the listener orientation is pointing
+// away from a source.
+TEST_F(OcclusionNodeTest, ListenerDirectivityTest) {
+ OcclusionNode occlusion_processor(kSourceId, system_settings_);
+
+ AudioBuffer input(kNumMonoChannels, kFramesPerBuffer);
+ input.set_source_id(kSourceId);
+ GenerateSawToothSignal(kSawtoothLength, &input[0]);
+
+ // Set a hyper-cardioid shaped listener directivity for the source input.
+ SourceParameters* parameters = GetParameters();
+ parameters->listener_directivity_alpha = 0.5f;
+ parameters->listener_directivity_order = 2.0f;
+ // Set listener position to one meter away from the origin in Z axis. This is
+ // required for the listener directivity properties to take effect.
+ system_settings_.SetHeadPosition(WorldPosition(0.0f, 0.0f, 1.0f));
+
+ const double input_energy = CalculateSignalRms(input[0]);
+ // Process input with identity listener orientation.
+ const AudioBuffer* output_default =
+ GetProcessedData(&input, &occlusion_processor);
+ const double output_default_energy = CalculateSignalRms((*output_default)[0]);
+ // Process input with 90 degrees rotated listener orientation about Y axis.
+ system_settings_.SetHeadRotation(
+ WorldRotation(0.0f, kInverseSqrtTwo, 0.0f, kInverseSqrtTwo));
+ const AudioBuffer* output_rotated =
+ GetProcessedData(&input, &occlusion_processor);
+ const double output_rotated_energy = CalculateSignalRms((*output_rotated)[0]);
+
+ // Test if the output energy is lower when the listener was rotated away from
+ // the source.
+ EXPECT_NEAR(output_default_energy, input_energy, kEpsilonFloat);
+ EXPECT_LT(output_rotated_energy, output_default_energy);
+}
+
+// Test to ensure that a more heavily self occluded source shows a lower energy
+// output.
+TEST_F(OcclusionNodeTest, SourceDirectivityTest) {
+ OcclusionNode occlusion_processor_1(kSourceId, system_settings_);
+ OcclusionNode occlusion_processor_2(kSourceId, system_settings_);
+
+ AudioBuffer input(kNumMonoChannels, kFramesPerBuffer);
+ input.set_source_id(kSourceId);
+ GenerateSawToothSignal(kSawtoothLength, &input[0]);
+ AudioBuffer input_1;
+ input_1 = input;
+ AudioBuffer input_2;
+ input_2 = input;
+
+ SourceParameters* parameters = GetParameters();
+
+ parameters->directivity_alpha = 0.25f;
+ parameters->directivity_order = 2.0f;
+ const AudioBuffer* output_1 =
+ GetProcessedData(&input_1, &occlusion_processor_1);
+
+ parameters->directivity_order = 4.0f;
+ parameters->directivity_alpha = 0.25f;
+ const AudioBuffer* output_2 =
+ GetProcessedData(&input_2, &occlusion_processor_2);
+
+ const double output_1_energy = CalculateSignalRms((*output_1)[0]);
+ const double output_2_energy = CalculateSignalRms((*output_2)[0]);
+ const double input_energy = CalculateSignalRms(input[0]);
+
+ EXPECT_LT(output_1_energy, input_energy);
+ EXPECT_LT(output_2_energy, output_1_energy);
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.cc
new file mode 100644
index 000000000..1223fe7e8
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.cc
@@ -0,0 +1,113 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/reflections_node.h"
+
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+#include "base/misc_math.h"
+
+
+namespace vraudio {
+
+ReflectionsNode::ReflectionsNode(const SystemSettings& system_settings)
+ : system_settings_(system_settings),
+ reflections_processor_(system_settings_.GetSampleRateHz(),
+ system_settings_.GetFramesPerBuffer()),
+ num_frames_processed_on_empty_input_(
+ system_settings_.GetFramesPerBuffer()),
+ output_buffer_(kNumFirstOrderAmbisonicChannels,
+ system_settings_.GetFramesPerBuffer()),
+ silence_mono_buffer_(kNumMonoChannels,
+ system_settings_.GetFramesPerBuffer()) {
+ silence_mono_buffer_.Clear();
+ EnableProcessOnEmptyInput(true);
+}
+
+void ReflectionsNode::Update() {
+
+ const auto& current_reflection_properties = reflection_properties_;
+ const auto& new_reflection_properties =
+ system_settings_.GetReflectionProperties();
+ const bool room_position_changed =
+ !EqualSafe(std::begin(current_reflection_properties.room_position),
+ std::end(current_reflection_properties.room_position),
+ std::begin(new_reflection_properties.room_position),
+ std::end(new_reflection_properties.room_position));
+ const bool room_rotation_changed =
+ !EqualSafe(std::begin(current_reflection_properties.room_rotation),
+ std::end(current_reflection_properties.room_rotation),
+ std::begin(new_reflection_properties.room_rotation),
+ std::end(new_reflection_properties.room_rotation));
+ const bool room_dimensions_changed =
+ !EqualSafe(std::begin(current_reflection_properties.room_dimensions),
+ std::end(current_reflection_properties.room_dimensions),
+ std::begin(new_reflection_properties.room_dimensions),
+ std::end(new_reflection_properties.room_dimensions));
+ const bool cutoff_frequency_changed =
+ current_reflection_properties.cutoff_frequency !=
+ new_reflection_properties.cutoff_frequency;
+ const bool coefficients_changed =
+ !EqualSafe(std::begin(current_reflection_properties.coefficients),
+ std::end(current_reflection_properties.coefficients),
+ std::begin(new_reflection_properties.coefficients),
+ std::end(new_reflection_properties.coefficients));
+ const auto& current_listener_position = listener_position_;
+ const auto& new_listener_position = system_settings_.GetHeadPosition();
+ const bool listener_position_changed =
+ current_listener_position != new_listener_position;
+ if (room_position_changed || room_rotation_changed ||
+ room_dimensions_changed || cutoff_frequency_changed ||
+ coefficients_changed || listener_position_changed) {
+ // Update reflections processor if necessary.
+ reflection_properties_ = new_reflection_properties;
+ listener_position_ = new_listener_position;
+ reflections_processor_.Update(reflection_properties_, listener_position_);
+ }
+}
+
+const AudioBuffer* ReflectionsNode::AudioProcess(const NodeInput& input) {
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ const size_t num_frames = system_settings_.GetFramesPerBuffer();
+ if (input_buffer == nullptr) {
+ // If we have no input, generate a silent input buffer until the node states
+ // are cleared.
+ if (num_frames_processed_on_empty_input_ <
+ reflections_processor_.num_frames_to_process_on_empty_input()) {
+ num_frames_processed_on_empty_input_ += num_frames;
+ input_buffer = &silence_mono_buffer_;
+ } else {
+ // Skip processing entirely when the states are fully cleared.
+ return nullptr;
+ }
+ } else {
+ num_frames_processed_on_empty_input_ = 0;
+ DCHECK_EQ(input_buffer->num_channels(), kNumMonoChannels);
+ }
+ output_buffer_.Clear();
+ reflections_processor_.Process(*input_buffer, &output_buffer_);
+
+ // Rotate the reflections with respect to listener's orientation.
+ const WorldRotation inverse_head_rotation =
+ system_settings_.GetHeadRotation().conjugate();
+ foa_rotator_.Process(inverse_head_rotation, output_buffer_, &output_buffer_);
+
+ // Copy buffer parameters.
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.h
new file mode 100644
index 000000000..8685759b9
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/reflections_node.h
@@ -0,0 +1,78 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_REFLECTIONS_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_REFLECTIONS_NODE_H_
+
+#include <vector>
+
+#include "ambisonics/foa_rotator.h"
+#include "api/resonance_audio_api.h"
+#include "base/audio_buffer.h"
+#include "base/misc_math.h"
+#include "dsp/reflections_processor.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts a single mono buffer as input and outputs an ambisonically
+// encoded sound field buffer of the mix of all the early room reflections.
+class ReflectionsNode : public ProcessingNode {
+ public:
+ // Initializes |ReflectionsNode| class.
+ //
+ // @param system_settings Global system configuration.
+ explicit ReflectionsNode(const SystemSettings& system_settings);
+
+ // Updates the reflections. Depending on whether to use RT60s for reverb
+ // according to the global system settings, the reflections are calculated
+ // either by the current room properties or the proxy room properties.
+ void Update();
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const SystemSettings& system_settings_;
+
+ // First-order-ambisonics rotator to be used to rotate the reflections with
+ // respect to the listener's orientation.
+ FoaRotator foa_rotator_;
+
+ // Processes and encodes reflections into an ambisonic buffer.
+ ReflectionsProcessor reflections_processor_;
+
+ // Most recently updated reflection properties.
+ ReflectionProperties reflection_properties_;
+
+ // Most recently updated listener position.
+ WorldPosition listener_position_;
+
+ size_t num_frames_processed_on_empty_input_;
+
+ // Ambisonic output buffer.
+ AudioBuffer output_buffer_;
+
+ // Silence mono buffer to render reflection tails during the absence of input
+ // buffers.
+ AudioBuffer silence_mono_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_REFLECTIONS_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.cc
new file mode 100644
index 000000000..f8cc6731b
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.cc
@@ -0,0 +1,586 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/resonance_audio_api_impl.h"
+
+#include <algorithm>
+#include <numeric>
+
+#include "ambisonics/utils.h"
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+#include "base/misc_math.h"
+#include "base/source_parameters.h"
+
+#include "base/unique_ptr_wrapper.h"
+#include "config/source_config.h"
+#include "dsp/channel_converter.h"
+#include "dsp/distance_attenuation.h"
+#include "graph/source_parameters_manager.h"
+#include "utils/planar_interleaved_conversion.h"
+#include "utils/sample_type_conversion.h"
+
+namespace vraudio {
+
+namespace {
+
+// Support 50 setter calls for 512 sources.
+const size_t kMaxNumTasksOnTaskQueue = 50 * 512;
+
+// User warning/notification messages.
+static const char* kBadInputPointerMessage = "Ignoring nullptr buffer";
+static const char* kBufferSizeMustMatchNumFramesMessage =
+ "Number of frames must match the frames per buffer specified during "
+ "construction - ignoring buffer";
+
+// Helper method to fetch |SourceGraphConfig| from |RenderingMode|.
+SourceGraphConfig GetSourceGraphConfigFromRenderingMode(
+ RenderingMode rendering_mode) {
+ switch (rendering_mode) {
+ case RenderingMode::kStereoPanning:
+ return StereoPanningConfig();
+ case RenderingMode::kBinauralLowQuality:
+ return BinauralLowQualityConfig();
+ case RenderingMode::kBinauralMediumQuality:
+ return BinauralMediumQualityConfig();
+ case RenderingMode::kBinauralHighQuality:
+ return BinauralHighQualityConfig();
+ case RenderingMode::kRoomEffectsOnly:
+ return RoomEffectsOnlyConfig();
+ default:
+ LOG(FATAL) << "Unknown rendering mode";
+ break;
+ }
+ return BinauralHighQualityConfig();
+}
+
+} // namespace
+
+ResonanceAudioApiImpl::ResonanceAudioApiImpl(size_t num_channels,
+ size_t frames_per_buffer,
+ int sample_rate_hz)
+ : system_settings_(num_channels, frames_per_buffer, sample_rate_hz),
+ task_queue_(kMaxNumTasksOnTaskQueue),
+ source_id_counter_(0) {
+ if (num_channels != kNumStereoChannels) {
+ LOG(FATAL) << "Only stereo output is supported";
+ return;
+ }
+
+ if (frames_per_buffer > kMaxSupportedNumFrames) {
+ LOG(FATAL) << "Only frame lengths up to " << kMaxSupportedNumFrames
+ << " are supported.";
+ return;
+ }
+
+ // The pffft library requires a minimum buffer size of 32 samples.
+ if (frames_per_buffer < FftManager::kMinFftSize) {
+ LOG(FATAL) << "The minimum number of frames per buffer is "
+ << FftManager::kMinFftSize << " samples";
+ return;
+ }
+ graph_manager_.reset(new GraphManager(system_settings_));
+}
+
+ResonanceAudioApiImpl::~ResonanceAudioApiImpl() {
+ // Clear task queue before shutting down.
+ task_queue_.Execute();
+}
+
+bool ResonanceAudioApiImpl::FillInterleavedOutputBuffer(size_t num_channels,
+ size_t num_frames,
+ float* buffer_ptr) {
+ DCHECK(buffer_ptr);
+ return FillOutputBuffer<float*>(num_channels, num_frames, buffer_ptr);
+}
+
+bool ResonanceAudioApiImpl::FillInterleavedOutputBuffer(size_t num_channels,
+ size_t num_frames,
+ int16* buffer_ptr) {
+ DCHECK(buffer_ptr);
+ return FillOutputBuffer<int16*>(num_channels, num_frames, buffer_ptr);
+}
+
+bool ResonanceAudioApiImpl::FillPlanarOutputBuffer(size_t num_channels,
+ size_t num_frames,
+ float* const* buffer_ptr) {
+ DCHECK(buffer_ptr);
+ return FillOutputBuffer<float* const*>(num_channels, num_frames, buffer_ptr);
+}
+
+bool ResonanceAudioApiImpl::FillPlanarOutputBuffer(size_t num_channels,
+ size_t num_frames,
+ int16* const* buffer_ptr) {
+ DCHECK(buffer_ptr);
+ return FillOutputBuffer<int16* const*>(num_channels, num_frames, buffer_ptr);
+}
+
+void ResonanceAudioApiImpl::SetHeadPosition(float x, float y, float z) {
+ auto task = [this, x, y, z]() {
+ const WorldPosition head_position(x, y, z);
+ system_settings_.SetHeadPosition(head_position);
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetHeadRotation(float x, float y, float z,
+ float w) {
+ auto task = [this, w, x, y, z]() {
+ const WorldRotation head_rotation(w, x, y, z);
+ system_settings_.SetHeadRotation(head_rotation);
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetMasterVolume(float volume) {
+ auto task = [this, volume]() { system_settings_.SetMasterGain(volume); };
+ task_queue_.Post(task);
+}
+
+int ResonanceAudioApiImpl::CreateAmbisonicSource(size_t num_channels) {
+ if (num_channels < kNumFirstOrderAmbisonicChannels ||
+ !IsValidAmbisonicOrder(num_channels)) {
+ // Invalid number of input channels, don't create the ambisonic source.
+ LOG(ERROR) << "Invalid number of channels for the ambisonic source: "
+ << num_channels;
+ return kInvalidSourceId;
+ }
+
+ const int ambisonic_source_id = source_id_counter_.fetch_add(1);
+
+ const size_t num_valid_channels =
+ std::min(num_channels, graph_manager_->GetNumMaxAmbisonicChannels());
+ if (num_valid_channels < num_channels) {
+ LOG(WARNING) << "Number of ambisonic channels will be diminished to "
+ << num_valid_channels;
+ }
+
+ auto task = [this, ambisonic_source_id, num_valid_channels]() {
+ graph_manager_->CreateAmbisonicSource(ambisonic_source_id,
+ num_valid_channels);
+ system_settings_.GetSourceParametersManager()->Register(
+ ambisonic_source_id);
+ // Overwrite default source parameters for ambisonic source.
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ ambisonic_source_id);
+ source_parameters->room_effects_gain = 0.0f;
+ source_parameters->distance_rolloff_model = DistanceRolloffModel::kNone;
+ source_parameters->distance_attenuation = 1.0f;
+ };
+ task_queue_.Post(task);
+ return ambisonic_source_id;
+}
+
+int ResonanceAudioApiImpl::CreateStereoSource(size_t num_channels) {
+ if (num_channels > kNumStereoChannels) {
+ LOG(ERROR) << "Unsupported number of input channels";
+ return kInvalidSourceId;
+ }
+ const int stereo_source_id = source_id_counter_.fetch_add(1);
+
+ auto task = [this, stereo_source_id]() {
+ graph_manager_->CreateStereoSource(stereo_source_id);
+ system_settings_.GetSourceParametersManager()->Register(stereo_source_id);
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ stereo_source_id);
+ source_parameters->enable_hrtf = false;
+ };
+ task_queue_.Post(task);
+ return stereo_source_id;
+}
+
+int ResonanceAudioApiImpl::CreateSoundObjectSource(
+ RenderingMode rendering_mode) {
+ const int sound_object_source_id = source_id_counter_.fetch_add(1);
+
+ const auto config = GetSourceGraphConfigFromRenderingMode(rendering_mode);
+ auto task = [this, sound_object_source_id, config]() {
+ graph_manager_->CreateSoundObjectSource(
+ sound_object_source_id, config.ambisonic_order, config.enable_hrtf,
+ config.enable_direct_rendering);
+ system_settings_.GetSourceParametersManager()->Register(
+ sound_object_source_id);
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ sound_object_source_id);
+ source_parameters->enable_hrtf = config.enable_hrtf;
+ };
+ task_queue_.Post(task);
+ return sound_object_source_id;
+}
+
+void ResonanceAudioApiImpl::DestroySource(SourceId source_id) {
+ auto task = [this, source_id]() {
+ graph_manager_->DestroySource(source_id);
+ system_settings_.GetSourceParametersManager()->Unregister(source_id);
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetInterleavedBuffer(SourceId source_id,
+ const float* audio_buffer_ptr,
+ size_t num_channels,
+ size_t num_frames) {
+ SetSourceBuffer<const float*>(source_id, audio_buffer_ptr, num_channels,
+ num_frames);
+}
+
+void ResonanceAudioApiImpl::SetInterleavedBuffer(SourceId source_id,
+ const int16* audio_buffer_ptr,
+ size_t num_channels,
+ size_t num_frames) {
+ SetSourceBuffer<const int16*>(source_id, audio_buffer_ptr, num_channels,
+ num_frames);
+}
+
+void ResonanceAudioApiImpl::SetPlanarBuffer(
+ SourceId source_id, const float* const* audio_buffer_ptr,
+ size_t num_channels, size_t num_frames) {
+ SetSourceBuffer<const float* const*>(source_id, audio_buffer_ptr,
+ num_channels, num_frames);
+}
+
+void ResonanceAudioApiImpl::SetPlanarBuffer(
+ SourceId source_id, const int16* const* audio_buffer_ptr,
+ size_t num_channels, size_t num_frames) {
+ SetSourceBuffer<const int16* const*>(source_id, audio_buffer_ptr,
+ num_channels, num_frames);
+}
+
+void ResonanceAudioApiImpl::SetSourceDistanceAttenuation(
+ SourceId source_id, float distance_attenuation) {
+ auto task = [this, source_id, distance_attenuation]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ if (source_parameters != nullptr) {
+ const auto& rolloff_model = source_parameters->distance_rolloff_model;
+ DCHECK_EQ(rolloff_model, DistanceRolloffModel::kNone);
+ if (rolloff_model != DistanceRolloffModel::kNone) {
+ LOG(WARNING) << "Implicit distance rolloff model is set. The value "
+ "will be overwritten.";
+ }
+ source_parameters->distance_attenuation = distance_attenuation;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSourceDistanceModel(SourceId source_id,
+ DistanceRolloffModel rolloff,
+ float min_distance,
+ float max_distance) {
+ if (max_distance < min_distance && rolloff != DistanceRolloffModel::kNone) {
+ LOG(WARNING) << "max_distance must be larger than min_distance";
+ return;
+ }
+ auto task = [this, source_id, rolloff, min_distance, max_distance]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->distance_rolloff_model = rolloff;
+ source_parameters->minimum_distance = min_distance;
+ source_parameters->maximum_distance = max_distance;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSourcePosition(SourceId source_id, float x,
+ float y, float z) {
+ const WorldPosition position(x, y, z);
+ auto task = [this, source_id, position]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->object_transform.position = position;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSourceRoomEffectsGain(SourceId source_id,
+ float room_effects_gain) {
+ auto task = [this, source_id, room_effects_gain]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->room_effects_gain = room_effects_gain;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSourceRotation(SourceId source_id, float x,
+ float y, float z, float w) {
+ const WorldRotation rotation(w, x, y, z);
+ auto task = [this, source_id, rotation]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->object_transform.rotation = rotation;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSourceVolume(SourceId source_id, float volume) {
+ auto task = [this, source_id, volume]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->gain = volume;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSoundObjectDirectivity(
+ SourceId sound_object_source_id, float alpha, float order) {
+ auto task = [this, sound_object_source_id, alpha, order]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ sound_object_source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->directivity_alpha = alpha;
+ source_parameters->directivity_order = order;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSoundObjectListenerDirectivity(
+ SourceId sound_object_source_id, float alpha, float order) {
+ auto task = [this, sound_object_source_id, alpha, order]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ sound_object_source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->listener_directivity_alpha = alpha;
+ source_parameters->listener_directivity_order = order;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSoundObjectNearFieldEffectGain(
+ SourceId sound_object_source_id, float gain) {
+ auto task = [this, sound_object_source_id, gain]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ sound_object_source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->near_field_gain = gain;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSoundObjectOcclusionIntensity(
+ SourceId sound_object_source_id, float intensity) {
+ auto task = [this, sound_object_source_id, intensity]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ sound_object_source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->occlusion_intensity = intensity;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetSoundObjectSpread(
+ SourceId sound_object_source_id, float spread_deg) {
+ auto task = [this, sound_object_source_id, spread_deg]() {
+ auto source_parameters =
+ system_settings_.GetSourceParametersManager()->GetMutableParameters(
+ sound_object_source_id);
+ if (source_parameters != nullptr) {
+ source_parameters->spread_deg = spread_deg;
+ }
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::EnableRoomEffects(bool enable) {
+ auto task = [this, enable]() { graph_manager_->EnableRoomEffects(enable); };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetReflectionProperties(
+ const ReflectionProperties& reflection_properties) {
+ auto task = [this, reflection_properties]() {
+ system_settings_.SetReflectionProperties(reflection_properties);
+ };
+ task_queue_.Post(task);
+}
+
+void ResonanceAudioApiImpl::SetReverbProperties(
+ const ReverbProperties& reverb_properties) {
+ auto task = [this, reverb_properties]() {
+ system_settings_.SetReverbProperties(reverb_properties);
+ };
+ task_queue_.Post(task);
+}
+
+const AudioBuffer* ResonanceAudioApiImpl::GetAmbisonicOutputBuffer() const {
+ return graph_manager_->GetAmbisonicBuffer();
+}
+
+const AudioBuffer* ResonanceAudioApiImpl::GetStereoOutputBuffer() const {
+ return graph_manager_->GetStereoBuffer();
+}
+
+const AudioBuffer *ResonanceAudioApiImpl::GetReverbBuffer() const {
+ return graph_manager_->GetReverbBuffer();
+}
+
+void ResonanceAudioApiImpl::ProcessNextBuffer() {
+#if defined(ENABLE_TRACING) && !ION_PRODUCTION
+ // This enables tracing on the audio thread.
+ auto task = []() { ENABLE_TRACING_ON_CURRENT_THREAD("AudioThread"); };
+ task_queue_.Post(task);
+#endif // defined(ENABLE_TRACING) && !ION_PRODUCTION
+
+
+ task_queue_.Execute();
+
+ // Update room effects only if the pipeline is initialized.
+ if (graph_manager_->GetRoomEffectsEnabled()) {
+ graph_manager_->UpdateRoomReflections();
+ graph_manager_->UpdateRoomReverb();
+ }
+ // Update source attenuation parameters.
+ const auto process = [this](SourceParameters* parameters) {
+ const float master_gain = system_settings_.GetMasterGain();
+ const auto& listener_position = system_settings_.GetHeadPosition();
+ const auto& reflection_properties =
+ system_settings_.GetReflectionProperties();
+ const auto& reverb_properties = system_settings_.GetReverbProperties();
+ UpdateAttenuationParameters(master_gain, reflection_properties.gain,
+ reverb_properties.gain, listener_position,
+ parameters);
+ };
+ system_settings_.GetSourceParametersManager()->ProcessAllParameters(process);
+
+ graph_manager_->Process();
+}
+
+void ResonanceAudioApiImpl::SetStereoSpeakerMode(bool enabled) {
+ auto task = [this, enabled]() {
+ system_settings_.SetStereoSpeakerMode(enabled);
+ };
+ task_queue_.Post(task);
+}
+
+template <typename OutputType>
+bool ResonanceAudioApiImpl::FillOutputBuffer(size_t num_channels,
+ size_t num_frames,
+ OutputType buffer_ptr) {
+
+
+ if (buffer_ptr == nullptr) {
+ LOG(WARNING) << kBadInputPointerMessage;
+ return false;
+ }
+ if (num_channels != kNumStereoChannels) {
+ LOG(WARNING) << "Output buffer must be stereo";
+ return false;
+ }
+ const size_t num_input_samples = num_frames * num_channels;
+ const size_t num_expected_output_samples =
+ system_settings_.GetFramesPerBuffer() * system_settings_.GetNumChannels();
+ if (num_input_samples != num_expected_output_samples) {
+ LOG(WARNING) << "Output buffer size must be " << num_expected_output_samples
+ << " samples";
+ return false;
+ }
+
+ // Get the processed output buffer.
+ ProcessNextBuffer();
+ const AudioBuffer* output_buffer = GetStereoOutputBuffer();
+ if (output_buffer == nullptr) {
+ // This indicates that the graph processing is triggered without having any
+ // connected sources.
+ return false;
+ }
+
+ FillExternalBuffer(*output_buffer, buffer_ptr, num_frames, num_channels);
+ return true;
+}
+
+template <typename SampleType>
+void ResonanceAudioApiImpl::SetSourceBuffer(SourceId source_id,
+ SampleType audio_buffer_ptr,
+ size_t num_input_channels,
+ size_t num_frames) {
+ // Execute task queue to ensure newly created sound sources are initialized.
+ task_queue_.Execute();
+
+ if (audio_buffer_ptr == nullptr) {
+ LOG(WARNING) << kBadInputPointerMessage;
+ return;
+ }
+ if (num_frames != system_settings_.GetFramesPerBuffer()) {
+ LOG(WARNING) << kBufferSizeMustMatchNumFramesMessage;
+ return;
+ }
+
+ AudioBuffer* const output_buffer =
+ graph_manager_->GetMutableAudioBuffer(source_id);
+ if (output_buffer == nullptr) {
+ LOG(WARNING) << "Source audio buffer not found";
+ return;
+ }
+ const size_t num_output_channels = output_buffer->num_channels();
+
+ if (num_input_channels == num_output_channels) {
+ FillAudioBuffer(audio_buffer_ptr, num_frames, num_input_channels,
+ output_buffer);
+
+ return;
+ }
+
+ if ((num_input_channels == kNumMonoChannels) &&
+ (num_output_channels == kNumStereoChannels)) {
+ FillAudioBufferWithChannelRemapping(
+ audio_buffer_ptr, num_frames, num_input_channels,
+ {0, 0} /* channel_map */, output_buffer);
+ return;
+ }
+
+ if (num_input_channels > num_output_channels) {
+ std::vector<size_t> channel_map(num_output_channels);
+ // Fill channel map with increasing indices.
+ std::iota(std::begin(channel_map), std::end(channel_map), 0);
+ FillAudioBufferWithChannelRemapping(audio_buffer_ptr, num_frames,
+ num_input_channels, channel_map,
+ output_buffer);
+ return;
+ }
+
+ LOG(WARNING) << "Number of input channels does not match the number of "
+ "output channels";
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.h b/src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.h
new file mode 100644
index 000000000..0adbc1c67
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/resonance_audio_api_impl.h
@@ -0,0 +1,175 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_RESONANCE_AUDIO_API_IMPL_H_
+#define RESONANCE_AUDIO_GRAPH_RESONANCE_AUDIO_API_IMPL_H_
+
+#include <atomic>
+#include <memory>
+#include <vector>
+
+#include "base/integral_types.h"
+#include "api/resonance_audio_api.h"
+#include "base/audio_buffer.h"
+#include "graph/graph_manager.h"
+#include "graph/system_settings.h"
+#include "utils/lockless_task_queue.h"
+
+namespace vraudio {
+
+// Implementation of ResonanceAudioApi interface.
+class ResonanceAudioApiImpl : public ResonanceAudioApi {
+ public:
+ // Constructor that initializes |ResonanceAudioApi| with system configuration.
+ //
+ // @param num_channels Number of channels of audio output.
+ // @param frames_per_buffer Number of frames per buffer.
+ // @param sample_rate_hz System sample rate.
+ ResonanceAudioApiImpl(size_t num_channels, size_t frames_per_buffer,
+ int sample_rate_hz);
+
+ ~ResonanceAudioApiImpl() override;
+
+ //////////////////////////////////
+ // ResonanceAudioApi implementation.
+ //////////////////////////////////
+
+ // Obtain processed output buffers.
+ bool FillInterleavedOutputBuffer(size_t num_channels, size_t num_frames,
+ float* buffer_ptr) override;
+ bool FillInterleavedOutputBuffer(size_t num_channels, size_t num_frames,
+ int16* buffer_ptr) override;
+ bool FillPlanarOutputBuffer(size_t num_channels, size_t num_frames,
+ float* const* buffer_ptr) override;
+ bool FillPlanarOutputBuffer(size_t num_channels, size_t num_frames,
+ int16* const* buffer_ptr) override;
+
+ // Listener configuration.
+ void SetHeadPosition(float x, float y, float z) override;
+ void SetHeadRotation(float x, float y, float z, float w) override;
+ void SetMasterVolume(float volume) override;
+ void SetStereoSpeakerMode(bool enabled) override;
+
+ // Create and destroy sources.
+ SourceId CreateAmbisonicSource(size_t num_channels) override;
+ SourceId CreateStereoSource(size_t num_channels) override;
+ SourceId CreateSoundObjectSource(RenderingMode rendering_mode) override;
+ void DestroySource(SourceId source_id) override;
+
+ // Set source data.
+ void SetInterleavedBuffer(SourceId source_id, const float* audio_buffer_ptr,
+ size_t num_channels, size_t num_frames) override;
+ void SetInterleavedBuffer(SourceId source_id, const int16* audio_buffer_ptr,
+ size_t num_channels, size_t num_frames) override;
+ void SetPlanarBuffer(SourceId source_id, const float* const* audio_buffer_ptr,
+ size_t num_channels, size_t num_frames) override;
+ void SetPlanarBuffer(SourceId source_id, const int16* const* audio_buffer_ptr,
+ size_t num_channels, size_t num_frames) override;
+
+ // Source configuration.
+ void SetSourceDistanceAttenuation(SourceId source_id,
+ float distance_attenuation) override;
+ void SetSourceDistanceModel(SourceId source_id, DistanceRolloffModel rolloff,
+ float min_distance, float max_distance) override;
+ void SetSourcePosition(SourceId source_id, float x, float y,
+ float z) override;
+ void SetSourceRoomEffectsGain(SourceId source_id,
+ float room_effects_gain) override;
+ void SetSourceRotation(SourceId source_id, float x, float y, float z,
+ float w) override;
+ void SetSourceVolume(SourceId source_id, float volume) override;
+
+ // Sound object configuration.
+ void SetSoundObjectDirectivity(SourceId sound_object_source_id, float alpha,
+ float order) override;
+ void SetSoundObjectListenerDirectivity(SourceId sound_object_source_id,
+ float alpha, float order) override;
+ void SetSoundObjectNearFieldEffectGain(SourceId sound_object_source_id,
+ float gain) override;
+ void SetSoundObjectOcclusionIntensity(SourceId sound_object_source_id,
+ float intensity) override;
+ void SetSoundObjectSpread(SourceId sound_object_source_id,
+ float spread_deg) override;
+
+ // Room effects configuration.
+ void EnableRoomEffects(bool enable) override;
+ void SetReflectionProperties(
+ const ReflectionProperties& reflection_properties) override;
+ void SetReverbProperties(const ReverbProperties& reverb_properties) override;
+
+ //////////////////////////////////
+ // Internal API methods.
+ //////////////////////////////////
+
+ // Returns the last processed output buffer of the ambisonic mix.
+ //
+ // @return Pointer to ambisonic output buffer.
+ const AudioBuffer* GetAmbisonicOutputBuffer() const;
+
+ // Returns the last processed output buffer of the stereo (binaural) mix.
+ //
+ // @return Pointer to stereo output buffer.
+ const AudioBuffer* GetStereoOutputBuffer() const;
+
+ // Returns the last processed buffer containing stereo data for the room reverb
+ //
+ // @return Pointer to room reverb stereo buffer.
+ const AudioBuffer* GetReverbBuffer() const;
+
+ // Triggers processing of the audio graph with the updated system properties.
+ void ProcessNextBuffer();
+
+ private:
+ // This method triggers the processing of the audio graph and outputs a
+ // binaural stereo output buffer.
+ //
+ // @tparam OutputType Output sample format, only float and int16 are
+ // supported.
+ // @param num_channels Number of channels in output buffer.
+ // @param num_frames Size of buffer in frames.
+ // @param buffer_ptr Raw pointer to audio buffer.
+ // @return True if a valid output was successfully rendered, false otherwise.
+ template <typename OutputType>
+ bool FillOutputBuffer(size_t num_channels, size_t num_frames,
+ OutputType buffer_ptr);
+
+ // Sets the next audio buffer to a sound source.
+ //
+ // @param source_id Id of sound source.
+ // @param audio_buffer_ptr Pointer to planar or interleaved audio buffer.
+ // @param num_input_channels Number of input channels.
+ // @param num_frames Number of frames per channel / audio buffer.
+ template <typename SampleType>
+ void SetSourceBuffer(SourceId source_id, SampleType audio_buffer_ptr,
+ size_t num_input_channels, size_t num_frames);
+
+ // Graph manager used to create and destroy sound objects.
+ std::unique_ptr<GraphManager> graph_manager_;
+
+ // Manages system wide settings.
+ SystemSettings system_settings_;
+
+ // Task queue to cache manipulation of all the entities in the system. All
+ // tasks are executed from the audio thread.
+ LocklessTaskQueue task_queue_;
+
+ // Incremental source id counter.
+ std::atomic<int> source_id_counter_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_RESONANCE_AUDIO_API_IMPL_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.cc
new file mode 100644
index 000000000..6a9999881
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.cc
@@ -0,0 +1,162 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/reverb_node.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+
+
+namespace vraudio {
+
+namespace {
+
+// Default time in seconds to update the rt60s over.
+const float kUpdateTimeSeconds = 1.0f;
+
+// Interpolates between the current and target values in steps of |update_step|,
+// will set |current| to |target| when the diff between them is less than
+// |update_step|.
+inline void InterpolateFloatParam(float update_step, float target,
+ float* current) {
+ if (std::abs(target - *current) <= std::abs(update_step)) {
+ *current = target;
+ } else {
+ *current += update_step;
+ }
+}
+
+} // namespace
+
+ReverbNode::ReverbNode(const SystemSettings& system_settings,
+ FftManager* fft_manager)
+ : system_settings_(system_settings),
+ rt60_band_update_steps_(kNumReverbOctaveBands, 0.0f),
+ gain_update_step_(0.0f),
+ rt60_updating_(false),
+ gain_updating_(false),
+ buffers_to_update_(
+ static_cast<float>(system_settings_.GetSampleRateHz()) *
+ kUpdateTimeSeconds /
+ static_cast<float>(system_settings_.GetFramesPerBuffer())),
+ spectral_reverb_(system_settings_.GetSampleRateHz(),
+ system_settings_.GetFramesPerBuffer()),
+ onset_compensator_(system_settings_.GetSampleRateHz(),
+ system_settings_.GetFramesPerBuffer(), fft_manager),
+ num_frames_processed_on_empty_input_(0),
+ reverb_length_frames_(0),
+ output_buffer_(kNumStereoChannels, system_settings_.GetFramesPerBuffer()),
+ compensator_output_buffer_(kNumStereoChannels,
+ system_settings_.GetFramesPerBuffer()),
+ silence_mono_buffer_(kNumMonoChannels,
+ system_settings_.GetFramesPerBuffer()) {
+ EnableProcessOnEmptyInput(true);
+ output_buffer_.Clear();
+ silence_mono_buffer_.Clear();
+ Update();
+}
+
+void ReverbNode::Update() {
+ new_reverb_properties_ = system_settings_.GetReverbProperties();
+
+ rt60_updating_ = !EqualSafe(std::begin(reverb_properties_.rt60_values),
+ std::end(reverb_properties_.rt60_values),
+ std::begin(new_reverb_properties_.rt60_values),
+ std::end(new_reverb_properties_.rt60_values));
+ if (rt60_updating_) {
+ for (size_t i = 0; i < kNumReverbOctaveBands; ++i) {
+ rt60_band_update_steps_[i] = (new_reverb_properties_.rt60_values[i] -
+ reverb_properties_.rt60_values[i]) /
+ buffers_to_update_;
+ }
+ }
+ // Update the reverb gain if necessary.
+ gain_updating_ = reverb_properties_.gain != new_reverb_properties_.gain;
+ if (gain_updating_) {
+ gain_update_step_ =
+ (new_reverb_properties_.gain - reverb_properties_.gain) /
+ buffers_to_update_;
+ }
+}
+
+const AudioBuffer* ReverbNode::GetOutputBuffer() const
+{
+ return &output_buffer_;
+}
+
+const AudioBuffer* ReverbNode::AudioProcess(const NodeInput& input) {
+ if (rt60_updating_) {
+ for (size_t i = 0; i < kNumReverbOctaveBands; ++i) {
+ InterpolateFloatParam(rt60_band_update_steps_[i],
+ new_reverb_properties_.rt60_values[i],
+ &reverb_properties_.rt60_values[i]);
+ }
+ spectral_reverb_.SetRt60PerOctaveBand(reverb_properties_.rt60_values);
+ const auto max_rt_it =
+ std::max_element(std::begin(reverb_properties_.rt60_values),
+ std::end(reverb_properties_.rt60_values));
+ reverb_length_frames_ = static_cast<size_t>(
+ *max_rt_it * static_cast<float>(system_settings_.GetSampleRateHz()));
+ onset_compensator_.Update(reverb_properties_.rt60_values,
+ reverb_properties_.gain);
+ // |InterpolateFloatParam| will set the two values below to be equal on
+ // completion of interpolation.
+ rt60_updating_ = !EqualSafe(std::begin(reverb_properties_.rt60_values),
+ std::end(reverb_properties_.rt60_values),
+ std::begin(new_reverb_properties_.rt60_values),
+ std::end(new_reverb_properties_.rt60_values));
+ }
+
+ if (gain_updating_) {
+ InterpolateFloatParam(gain_update_step_, new_reverb_properties_.gain,
+ &reverb_properties_.gain);
+ spectral_reverb_.SetGain(reverb_properties_.gain);
+ onset_compensator_.Update(reverb_properties_.rt60_values,
+ reverb_properties_.gain);
+ // |InterpolateFloatParam| will set the two values below to be equal on
+ // completion of interpolation.
+ gain_updating_ = reverb_properties_.gain != new_reverb_properties_.gain;
+ }
+
+ const AudioBuffer* input_buffer = input.GetSingleInput();
+ if (input_buffer == nullptr) {
+ // If we have no input, generate a silent input buffer until the node states
+ // are cleared.
+ if (num_frames_processed_on_empty_input_ < reverb_length_frames_) {
+ const size_t num_frames = system_settings_.GetFramesPerBuffer();
+ num_frames_processed_on_empty_input_ += num_frames;
+ spectral_reverb_.Process(silence_mono_buffer_[0], &output_buffer_[0],
+ &output_buffer_[1]);
+ return &output_buffer_;
+ } else {
+ // Skip processing entirely when the states are fully cleared.
+ return nullptr;
+ }
+ }
+ DCHECK_EQ(input_buffer->num_channels(), kNumMonoChannels);
+ num_frames_processed_on_empty_input_ = 0;
+ spectral_reverb_.Process((*input_buffer)[0], &output_buffer_[0],
+ &output_buffer_[1]);
+ onset_compensator_.Process(*input_buffer, &compensator_output_buffer_);
+ output_buffer_[0] += compensator_output_buffer_[0];
+ output_buffer_[1] += compensator_output_buffer_[1];
+ return &output_buffer_;
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.h
new file mode 100644
index 000000000..f9921fa66
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/reverb_node.h
@@ -0,0 +1,99 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_REVERB_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_REVERB_NODE_H_
+
+#include "api/resonance_audio_api.h"
+#include "base/audio_buffer.h"
+#include "dsp/fft_manager.h"
+#include "dsp/reverb_onset_compensator.h"
+#include "dsp/spectral_reverb.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Implements a spectral reverb producing a decorrelated stereo output with
+// onset compensated by a pair of convolution filters.
+class ReverbNode : public ProcessingNode {
+ public:
+ // Constructs a |ReverbNode|.
+ //
+ // @param system_settings Global system configuration.
+ // @param fft_manager Pointer to a manager to perform FFT transformations.
+ ReverbNode(const SystemSettings& system_settings, FftManager* fft_manager);
+
+ // Updates the |SpectralReverb| using the current room properties or RT60
+ // values depending on the system settings.
+ void Update();
+
+ const AudioBuffer *GetOutputBuffer() const;
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ // Global system configuration.
+ const SystemSettings& system_settings_;
+
+ // Current reverb properties.
+ ReverbProperties reverb_properties_;
+
+ // New reverb properties.
+ ReverbProperties new_reverb_properties_;
+
+ // Per band reverb time update step sizes.
+ std::vector<float> rt60_band_update_steps_;
+
+ // Update step size for the gain parameter.
+ float gain_update_step_;
+
+ // Denotes whether the rt60s are currently being updated.
+ bool rt60_updating_;
+
+ // Denotes whether the gain is currently being updated.
+ bool gain_updating_;
+
+ // Number of buffers to updae rt60s over.
+ float buffers_to_update_;
+
+ // DSP class to perform filtering associated with the reverb.
+ SpectralReverb spectral_reverb_;
+
+ // DSP class to perform spectral reverb onset compensation.
+ ReverbOnsetCompensator onset_compensator_;
+
+ // Number of frames of zeroed out data to be processed by the node to ensure
+ // the entire tail is rendered after input has ceased.
+ size_t num_frames_processed_on_empty_input_;
+
+ // Longest current reverb time, across all bands, in frames.
+ size_t reverb_length_frames_;
+
+ // Output buffers for mixing spectral reverb and compensator output.
+ AudioBuffer output_buffer_;
+ AudioBuffer compensator_output_buffer_;
+
+ // Silence mono buffer to render reverb tails during the absence of input
+ // buffers.
+ AudioBuffer silence_mono_buffer_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_REVERB_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/source_graph_config.h b/src/3rdparty/resonance-audio/resonance_audio/graph/source_graph_config.h
new file mode 100644
index 000000000..513f9b087
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/source_graph_config.h
@@ -0,0 +1,43 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_SOURCE_GRAPH_CONFIG_H_
+#define RESONANCE_AUDIO_GRAPH_SOURCE_GRAPH_CONFIG_H_
+
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace vraudio {
+
+// Configuration of a source and the nodes it is instantiating.
+struct SourceGraphConfig {
+ // Configuration name.
+ std::string configuration_name;
+
+ // Ambisonic order to encode to/decode from source.
+ int ambisonic_order = 1;
+
+ // Flag to enable HRTF-based rendering of source.
+ bool enable_hrtf = true;
+
+ // Flag to enable direct rendering of source.
+ bool enable_direct_rendering = true;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_SOURCE_GRAPH_CONFIG_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.cc
new file mode 100644
index 000000000..edc903cc5
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.cc
@@ -0,0 +1,58 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/source_parameters_manager.h"
+
+#include "base/logging.h"
+
+namespace vraudio {
+
+void SourceParametersManager::Register(SourceId source_id) {
+ DCHECK(parameters_.find(source_id) == parameters_.end());
+ parameters_[source_id] = SourceParameters();
+}
+
+void SourceParametersManager::Unregister(SourceId source_id) {
+ parameters_.erase(source_id);
+}
+
+const SourceParameters* SourceParametersManager::GetParameters(
+ SourceId source_id) const {
+ const auto source_parameters_itr = parameters_.find(source_id);
+ if (source_parameters_itr == parameters_.end()) {
+ LOG(ERROR) << "Source " << source_id << " not found";
+ return nullptr;
+ }
+ return &source_parameters_itr->second;
+}
+
+SourceParameters* SourceParametersManager::GetMutableParameters(
+ SourceId source_id) {
+ auto source_parameters_itr = parameters_.find(source_id);
+ if (source_parameters_itr == parameters_.end()) {
+ LOG(ERROR) << "Source " << source_id << " not found";
+ return nullptr;
+ }
+ return &source_parameters_itr->second;
+}
+
+void SourceParametersManager::ProcessAllParameters(const Process& process) {
+ for (auto& source_parameters_itr : parameters_) {
+ process(&source_parameters_itr.second);
+ }
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.h b/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.h
new file mode 100644
index 000000000..a6b394fd7
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager.h
@@ -0,0 +1,68 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_SOURCE_PARAMETERS_MANAGER_H_
+#define RESONANCE_AUDIO_GRAPH_SOURCE_PARAMETERS_MANAGER_H_
+
+#include <functional>
+#include <unordered_map>
+
+#include "base/constants_and_types.h"
+#include "base/source_parameters.h"
+
+namespace vraudio {
+
+// Class that manages the corresponding parameters of each registered source.
+class SourceParametersManager {
+ public:
+ // Alias for the parameters process closure type.
+ using Process = std::function<void(SourceParameters*)>;
+
+ // Registers new source parameters for given |source_id|.
+ //
+ // @param source_id Source id.
+ void Register(SourceId source_id);
+
+ // Unregisters the source parameters for given |source_id|.
+ //
+ // @param source_id Source id.
+ void Unregister(SourceId source_id);
+
+ // Returns read-only source parameters for given |source_id|.
+ //
+ // @param source_id Source id.
+ // @return Read-only source parameters, nullptr if |source_id| not found.
+ const SourceParameters* GetParameters(SourceId source_id) const;
+
+ // Returns mutable source parameters for given |source_id|.
+ //
+ // @param source_id Source id.
+ // @return Mutable source parameters, nullptr if |source_id| not found.
+ SourceParameters* GetMutableParameters(SourceId source_id);
+
+ // Executes given |process| for the parameters of each registered source.
+ //
+ // @param process Parameters processing method.
+ void ProcessAllParameters(const Process& process);
+
+ private:
+ // Registered source parameters.
+ std::unordered_map<SourceId, SourceParameters> parameters_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_SOURCE_PARAMETERS_MANAGER_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager_test.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager_test.cc
new file mode 100644
index 000000000..59c69eaed
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/source_parameters_manager_test.cc
@@ -0,0 +1,91 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/source_parameters_manager.h"
+
+#include "third_party/googletest/googletest/include/gtest/gtest.h"
+
+namespace vraudio {
+
+namespace {
+
+// Tests that the manager registers/unregisters source parameters as expected
+// for given arbitrary source ids.
+TEST(SourceParametersManagerTest, RegisterUnregisterTest) {
+ const SourceId kSourceIds[] = {0, 1, 5, 10};
+
+ // Initialize a new |SourceParametersManager|.
+ SourceParametersManager source_parameters_manager;
+ for (const auto source_id : kSourceIds) {
+ // Verify that no parameters are registered for given |source_id|.
+ EXPECT_TRUE(source_parameters_manager.GetParameters(source_id) == nullptr);
+ // Verify that the parameters are initialized after |Register|.
+ source_parameters_manager.Register(source_id);
+ EXPECT_FALSE(source_parameters_manager.GetParameters(source_id) == nullptr);
+ // Verify that the parameters are destroyed after |Unregister|.
+ source_parameters_manager.Unregister(source_id);
+ EXPECT_TRUE(source_parameters_manager.GetParameters(source_id) == nullptr);
+ }
+}
+
+// Tests that the manager correctly applies and returns parameter values of a
+// source for a given arbitrary modifier.
+TEST(SourceParametersManagerTest, ParametersAccessTest) {
+ const SourceId kSourceId = 1;
+ const float kSourceGain = 0.25f;
+
+ // Initialize a new |SourceParametersManager| and register the source.
+ SourceParametersManager source_parameters_manager;
+ source_parameters_manager.Register(kSourceId);
+ // Modify the gain parameter.
+ auto mutable_parameters =
+ source_parameters_manager.GetMutableParameters(kSourceId);
+ EXPECT_TRUE(mutable_parameters != nullptr);
+ mutable_parameters->gain = kSourceGain;
+ // Access the parameters to verify the gain value was applied correctly.
+ const auto parameters = source_parameters_manager.GetParameters(kSourceId);
+ EXPECT_TRUE(parameters != nullptr);
+ EXPECT_EQ(kSourceGain, parameters->gain);
+}
+
+// Tests that the manager correctly executes a given arbitrary call to process
+// all parameters for all the sources contained within.
+TEST(SourceParametersManagerTest, ProcessAllParametersTest) {
+ const SourceId kSourceIds[] = {0, 1, 2, 3, 4, 5};
+ const float kDistanceAttenuation = 0.75f;
+ const auto kProcess = [kDistanceAttenuation](SourceParameters* parameters) {
+ parameters->distance_attenuation = kDistanceAttenuation;
+ };
+
+ // Initialize a new |SourceParametersManager| and register all the sources.
+ SourceParametersManager source_parameters_manager;
+ for (const auto source_id : kSourceIds) {
+ source_parameters_manager.Register(source_id);
+ }
+ // Process all parameters to apply the distance attenuation.
+ source_parameters_manager.ProcessAllParameters(kProcess);
+ // Verify that the distance attenuation value was applied correctly to all the
+ // sources.
+ for (const auto source_id : kSourceIds) {
+ const auto parameters = source_parameters_manager.GetParameters(source_id);
+ EXPECT_TRUE(parameters != nullptr);
+ EXPECT_EQ(kDistanceAttenuation, parameters->distance_attenuation);
+ }
+}
+
+} // namespace
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.cc b/src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.cc
new file mode 100644
index 000000000..d919e94c1
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.cc
@@ -0,0 +1,65 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#include "graph/stereo_mixing_panner_node.h"
+
+#include "base/constants_and_types.h"
+#include "base/logging.h"
+#include "base/spherical_angle.h"
+
+#include "dsp/stereo_panner.h"
+
+namespace vraudio {
+
+StereoMixingPannerNode::StereoMixingPannerNode(
+ const SystemSettings& system_settings)
+ : system_settings_(system_settings),
+ gain_mixer_(kNumStereoChannels, system_settings_.GetFramesPerBuffer()),
+ coefficients_(kNumStereoChannels) {}
+
+const AudioBuffer* StereoMixingPannerNode::AudioProcess(
+ const NodeInput& input) {
+
+
+ const WorldPosition& listener_position = system_settings_.GetHeadPosition();
+ const WorldRotation& listener_rotation = system_settings_.GetHeadRotation();
+
+ gain_mixer_.Reset();
+ for (auto& input_buffer : input.GetInputBuffers()) {
+ const int source_id = input_buffer->source_id();
+ const auto source_parameters =
+ system_settings_.GetSourceParameters(source_id);
+ DCHECK_NE(source_id, kInvalidSourceId);
+ DCHECK_EQ(input_buffer->num_channels(), 1U);
+
+ // Compute the relative source direction in spherical angles.
+ const ObjectTransform& source_transform =
+ source_parameters->object_transform;
+ WorldPosition relative_direction;
+ GetRelativeDirection(listener_position, listener_rotation,
+ source_transform.position, &relative_direction);
+ const SphericalAngle source_direction =
+ SphericalAngle::FromWorldPosition(relative_direction);
+
+
+ CalculateStereoPanGains(source_direction, &coefficients_);
+
+ gain_mixer_.AddInputChannel((*input_buffer)[0], source_id, coefficients_);
+ }
+ return gain_mixer_.GetOutput();
+}
+
+} // namespace vraudio
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.h b/src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.h
new file mode 100644
index 000000000..b465917bd
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/stereo_mixing_panner_node.h
@@ -0,0 +1,61 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_STEREO_MIXING_PANNER_NODE_H_
+#define RESONANCE_AUDIO_GRAPH_STEREO_MIXING_PANNER_NODE_H_
+
+#include <vector>
+
+#include "base/audio_buffer.h"
+#include "dsp/gain_mixer.h"
+#include "graph/system_settings.h"
+#include "node/processing_node.h"
+
+namespace vraudio {
+
+// Node that accepts single mono sound object buffer as input and pans into a
+// stereo panorama.
+class StereoMixingPannerNode : public ProcessingNode {
+ public:
+ // Initializes StereoMixingPannerNode class.
+ //
+ // @param system_settings Global system configuration.
+ explicit StereoMixingPannerNode(const SystemSettings& system_settings);
+
+ // Node implementation.
+ bool CleanUp() final {
+ CallCleanUpOnInputNodes();
+ // Prevent node from being disconnected when all sources are removed.
+ return false;
+ }
+
+ protected:
+ // Implements ProcessingNode.
+ const AudioBuffer* AudioProcess(const NodeInput& input) override;
+
+ private:
+ const SystemSettings& system_settings_;
+
+ // |GainMixer| instance.
+ GainMixer gain_mixer_;
+
+ // Panning coefficients to be applied the input.
+ std::vector<float> coefficients_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_STEREO_MIXING_PANNER_NODE_H_
diff --git a/src/3rdparty/resonance-audio/resonance_audio/graph/system_settings.h b/src/3rdparty/resonance-audio/resonance_audio/graph/system_settings.h
new file mode 100644
index 000000000..48573be54
--- /dev/null
+++ b/src/3rdparty/resonance-audio/resonance_audio/graph/system_settings.h
@@ -0,0 +1,189 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS-IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+#ifndef RESONANCE_AUDIO_GRAPH_SYSTEM_SETTINGS_H_
+#define RESONANCE_AUDIO_GRAPH_SYSTEM_SETTINGS_H_
+
+#include "api/resonance_audio_api.h"
+#include "base/constants_and_types.h"
+#include "base/misc_math.h"
+#include "graph/source_parameters_manager.h"
+
+namespace vraudio {
+
+// Contains system-wide settings and parameters. Note that this class is not
+// thread-safe. Updating system parameters must be avoided during the audio
+// graph processing.
+class SystemSettings {
+ public:
+ // Constructor initializes the system configuration.
+ //
+ // @param num_output_channels Number of output channels.
+ // @param frames_per_buffer Buffer size in frames.
+ // @param sample_rate_hz Sample rate.
+ SystemSettings(size_t num_output_channels, size_t frames_per_buffer,
+ int sample_rate_hz)
+ : sample_rate_hz_(sample_rate_hz),
+ frames_per_buffer_(frames_per_buffer),
+ num_channels_(num_output_channels),
+ head_rotation_(WorldRotation::Identity()),
+ head_position_(WorldPosition::Zero()),
+ master_gain_(1.0f),
+ stereo_speaker_mode_(false) {}
+
+ // Sets the listener head orientation.
+ //
+ // @param head_rotation Listener head orientation.
+ void SetHeadRotation(const WorldRotation& head_rotation) {
+ head_rotation_ = head_rotation;
+ }
+
+ // Sets the listener head position.
+ //
+ // @param head_position Listener head position.
+ void SetHeadPosition(const WorldPosition& head_position) {
+ head_position_ = head_position;
+ }
+
+ // Sets the global stereo speaker mode flag. This flag enforces stereo panning
+ // and disables HRTF-based binauralization. The stereo speaker mode is
+ // disabled by default.
+ //
+ // @param enabled Defines the stereo speaker mode state.
+ void SetStereoSpeakerMode(bool enabled) { stereo_speaker_mode_ = enabled; }
+
+ // Returns the source parameters manager.
+ //
+ // @return Mutable source parameters manager.
+ SourceParametersManager* GetSourceParametersManager() {
+ return &source_parameters_manager_;
+ }
+
+ // Returns the parameters of source with given |source_id|.
+ //
+ // @param source_id Source id.
+ // @return Pointer to source parameters, nullptr if |source_id| not found.
+ const SourceParameters* GetSourceParameters(SourceId source_id) const {
+ return source_parameters_manager_.GetParameters(source_id);
+ }
+
+ // Returns the sample rate.
+ //
+ // @return Sample rate in Hertz.
+ int GetSampleRateHz() const { return sample_rate_hz_; }
+
+ // Returns the frames per buffer.
+ //
+ // @return Buffer size in frames.
+ size_t GetFramesPerBuffer() const { return frames_per_buffer_; }
+
+ // Returns the number of output channels.
+ //
+ // @return Number of output channels.
+ size_t GetNumChannels() const { return num_channels_; }
+
+ // Returns the head rotation.
+ //
+ // @return Head orientation.
+ const WorldRotation& GetHeadRotation() const { return head_rotation_; }
+
+ // Returns the head position.
+ //
+ // @return Head position.
+ const WorldPosition& GetHeadPosition() const { return head_position_; }
+
+ // Returns the stereo speaker mode state.
+ //
+ // @return Current stereo speaker mode state.
+ bool IsStereoSpeakerModeEnabled() const { return stereo_speaker_mode_; }
+
+ // Sets the master gain.
+ //
+ // @param master_gain Master output gain.
+ void SetMasterGain(float master_gain) { master_gain_ = master_gain; }
+
+ // Sets current reflection properties.
+ //
+ // @param reflection_properties Reflection properties.
+ void SetReflectionProperties(
+ const ReflectionProperties& reflection_properties) {
+ reflection_properties_ = reflection_properties;
+ }
+
+ // Sets current reverb properties.
+ //
+ // @param reverb_properties Reflection properties.
+ void SetReverbProperties(const ReverbProperties& reverb_properties) {
+ reverb_properties_ = reverb_properties;
+ }
+
+ // Returns the master gain.
+ //
+ // @return Master output gain.
+ float GetMasterGain() const { return master_gain_; }
+
+ // Returns the current reflection properties of the environment.
+ //
+ // @return Current reflection properties.
+ const ReflectionProperties& GetReflectionProperties() const {
+ return reflection_properties_;
+ }
+
+ // Returns the current reverb properties of the environment.
+ //
+ // @return Current reverb properties.
+ const ReverbProperties& GetReverbProperties() const {
+ return reverb_properties_;
+ }
+
+ // Disable copy and assignment operator. Since |SystemSettings| serves as a
+ // global parameter storage, it should never be copied.
+ SystemSettings& operator=(const SystemSettings&) = delete;
+ SystemSettings(const SystemSettings&) = delete;
+
+ private:
+ // Sampling rate.
+ const int sample_rate_hz_;
+
+ // Frames per buffer.
+ const size_t frames_per_buffer_;
+
+ // Number of channels per buffer.
+ const size_t num_channels_;
+
+ // The most recently updated head rotation and position.
+ WorldRotation head_rotation_;
+ WorldPosition head_position_;
+
+ // Source parameters manager.
+ SourceParametersManager source_parameters_manager_;
+
+ // Master gain in amplitude.
+ float master_gain_;
+
+ // Current reflection properties of the environment.
+ ReflectionProperties reflection_properties_;
+
+ // Current reverb properties of the environment.
+ ReverbProperties reverb_properties_;
+
+ // Defines the state of the global speaker mode.
+ bool stereo_speaker_mode_;
+};
+
+} // namespace vraudio
+
+#endif // RESONANCE_AUDIO_GRAPH_SYSTEM_SETTINGS_H_