diff options
Diffstat (limited to 'src/3rdparty/resonance-audio/resonance_audio/base')
24 files changed, 5035 insertions, 0 deletions
diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/aligned_allocator.h b/src/3rdparty/resonance-audio/resonance_audio/base/aligned_allocator.h new file mode 100644 index 000000000..628ccaa02 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/aligned_allocator.h @@ -0,0 +1,117 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_ALIGNED_ALLOCATOR_H_ +#define RESONANCE_AUDIO_BASE_ALIGNED_ALLOCATOR_H_ + +#include <stdlib.h> + +#include <algorithm> +#include <cstddef> +#include <memory> +#include <type_traits> + +#include "base/simd_utils.h" + + +namespace vraudio { + +// Performs static assert checks on the types size and alignment parameters. +template <size_t TypeSize, size_t Alignment> +void StaticAlignmentCheck() { + const bool alignment_is_power_of_two = + !(Alignment == 0) && !(Alignment & (Alignment - 1)); + static_assert(alignment_is_power_of_two, "Alignment must be power of two"); + + const bool type_size_is_power_of_two = !(TypeSize & (TypeSize - 1)); + static_assert(type_size_is_power_of_two, "Type size must be power of two"); +} + +// Returns a pointer to aligned memory. +template <typename Type, typename SizeType, typename PointerType> +PointerType AllignedMalloc(SizeType size, SizeType alignment) { + const SizeType data_size = size * sizeof(Type); + const SizeType offset = alignment - 1 + sizeof(PointerType); + void* mem_block_begin = malloc(data_size + offset); + if (mem_block_begin == nullptr) { + return nullptr; + } + // Find memory aligned address. + void** mem_block_aligned = reinterpret_cast<void**>( + ((reinterpret_cast<SizeType>(mem_block_begin) + offset) & + (~(alignment - 1)))); + // Save pointer to original block right before the aligned block. + mem_block_aligned[-1] = mem_block_begin; + return reinterpret_cast<PointerType>(mem_block_aligned); +} + +// Frees memory that has been aligned with |AllignedMalloc|. +template <typename PointerType> +void AllignedFree(PointerType mem_block_aligned) { + free(*(reinterpret_cast<void**>(mem_block_aligned) - 1)); +} + +// Class that allocates aligned memory. It is derived from std::allocator class +// to be used with STL containers. +// +// @tparam Type Datatype of container to allocate. +// @tparam Alignment Size of memory alignment. +template <typename Type, size_t Alignment> +class AlignedAllocator : public std::allocator<Type> { + public: + using Pointer = typename std::allocator_traits<std::allocator<Type>>::pointer; + using ConstPointer = typename std::allocator_traits<std::allocator<Type>>::const_pointer; + using SizeType = typename std::allocator_traits<std::allocator<Type>>::size_type; + + AlignedAllocator() { StaticAlignmentCheck<sizeof(Type), Alignment>(); } + + // Allocates memory for |size| elements and returns a pointer that is aligned + // to a multiple to |Alignment|. + // + // @param size Number of elements to allocate. + // @return Returns memory aligned pointer. + Pointer allocate(SizeType size) { return allocate(size, nullptr); } + + // Allocates memory for |size| elements and returns a pointer that is aligned + // to a multiple to |Alignment|. + // + // @param size Number of elements to allocate. + // @return Returns memory aligned pointer. + Pointer allocate(SizeType size, ConstPointer /* hint */) { + + return AllignedMalloc<Type, SizeType, Pointer>(size, Alignment); + } + + void deallocate(Pointer mem_block_aligned, size_t size) { + AllignedFree<Pointer>(mem_block_aligned); + } + + // Copy constructor to support rebind operation (to make MSVC happy). + template <typename U> + explicit AlignedAllocator<Type, Alignment>( + const AlignedAllocator<U, Alignment>& other) {} + + // Rebind is used to allocate container internal variables of type |U| + // (which don't need to be aligned). + template <typename U> + struct rebind { + typedef AlignedAllocator<U, Alignment> other; + }; +}; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_ALIGNED_ALLOCATOR_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/aligned_allocator_test.cc b/src/3rdparty/resonance-audio/resonance_audio/base/aligned_allocator_test.cc new file mode 100644 index 000000000..c38b8a2f5 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/aligned_allocator_test.cc @@ -0,0 +1,53 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/aligned_allocator.h" + +#include <cstddef> +#include <vector> + +#include "base/integral_types.h" +#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "base/simd_utils.h" + +using vraudio::AlignedAllocator; + +namespace { + +// Helper method to test memory alignment. +template <size_t Alignment> +void TestAlignedAllocator() { + static const size_t kRuns = 1000; + for (size_t run = 0; run < kRuns; ++run) { + std::vector<float, AlignedAllocator<float, Alignment> > aligned_vector(1); + const bool is_aligned = + ((reinterpret_cast<size_t>(&aligned_vector[0]) & (Alignment - 1)) == 0); + EXPECT_TRUE(is_aligned); + } +} + +} // namespace + +// Allocates multiple std::vectors using the AlignedAllocator and tests if the +// allocated memory is aligned. +TEST(AlignedAlocatorTest, TestAlignment) { + TestAlignedAllocator<2>(); + TestAlignedAllocator<4>(); + TestAlignedAllocator<16>(); + TestAlignedAllocator<32>(); + TestAlignedAllocator<64>(); +} + diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer.cc b/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer.cc new file mode 100644 index 000000000..c2ad61510 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer.cc @@ -0,0 +1,74 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/audio_buffer.h" + +namespace vraudio { + +AudioBuffer::AudioBuffer() : num_frames_(0), source_id_(kInvalidSourceId) {} + +AudioBuffer::AudioBuffer(size_t num_channels, size_t num_frames) + : num_frames_(num_frames), source_id_(kInvalidSourceId) { + + InitChannelViews(num_channels); +} + +// Copy assignment from AudioBuffer. +AudioBuffer& AudioBuffer::operator=(const AudioBuffer& other) { + if (this != &other) { + num_frames_ = other.num_frames_; + source_id_ = other.source_id_; + InitChannelViews(other.num_channels()); + for (size_t i = 0; i < num_channels(); ++i) { + channel_views_[i] = other.channel_views_[i]; + } + } + return *this; +} + +AudioBuffer::AudioBuffer(AudioBuffer&& other) { + num_frames_ = other.num_frames_; + other.num_frames_ = 0; + data_ = std::move(other.data_); + data_size_ = other.data_size_; + other.data_size_ = 0; + channel_views_ = std::move(other.channel_views_); + source_id_ = other.source_id_; + other.source_id_ = kInvalidSourceId; +} + +void AudioBuffer::InitChannelViews(size_t num_channels) { + + + const size_t num_frames_to_next_channel = FindNextAlignedArrayIndex( + num_frames_, sizeof(float), kMemoryAlignmentBytes); + + data_size_ = num_channels * num_frames_to_next_channel; + data_.resize(data_size_); + + channel_views_.clear(); + channel_views_.reserve(num_channels); + + float* itr = data_.data(); + + for (size_t i = 0; i < num_channels; ++i) { + ChannelView new_channel_view(itr, num_frames_); + channel_views_.push_back(new_channel_view); + itr += num_frames_to_next_channel; + } +} + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer.h b/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer.h new file mode 100644 index 000000000..c63fd905c --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer.h @@ -0,0 +1,222 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_AUDIO_BUFFER_H_ +#define RESONANCE_AUDIO_BASE_AUDIO_BUFFER_H_ + +#include <algorithm> +#include <memory> + +#include "base/integral_types.h" +#include "base/aligned_allocator.h" +#include "base/channel_view.h" +#include "base/constants_and_types.h" +#include "base/logging.h" +#include "base/simd_utils.h" + + +namespace vraudio { + +// Audio buffer that manages multi-channel audio data in a planar data format. +// All channels are sequentially stored within a single consecutive chunk of +// memory. To access individual channel data, the array subscript operator can +// be used to obtain a |AudioBuffer::Channel|. Note that the user must guarantee +// that the AudioBuffer instance lives as long as its channel data is accessed +// via |AudioBuffer::Channel|s. Note that allocated buffers may *not* be +// initialized to zero. +// +// Examples: +// +// // Range-based for-loop over all channels and all samples. +// AudioBuffer audio_buffer(...) +// for (AudioBuffer::Channel& channel : audio_buffer) { +// for (float& sample : channel) { +// sample *= gain; +// } +// } +// +// // Range-based for-loop over all channels and array subscripts-based for-loop +// // to access samples. +// AudioBuffer audio_buffer(...) +// for (AudioBuffer::Channel& channel : audio_buffer) { +// for (size_t i = 0; i < channel.num_frames(); ++i) { +// channel[i] *= gain; +// } +// } +// +// // Array subscript-based for-loops over all channels samples. +// // AudioBuffer audio_buffer(...) +// for (size_t c=0; c < audio_buffer.num_channels(); ++c) { +// // First obtain a reference to AudioBuffer::Channel. +// AudioBuffer::Channel& channel = audio_buffer[c]; +// for (size_t i = 0; i < channel.num_frames(); ++i) { +// channel[i] *= gain; +// } +// } +// +// Note do *NOT* use double array subscripts to iterate over multiple samples +// since it performs a channel iterator lookup for every sample: +// for (size_t c=0; c < audio_buffer.num_channels(); ++c) { +// for (size_t i = 0; i < channel.size(); ++i) { +// audio_buffer[c][i] *= gain; // *BAD* +// } +// } +// +class AudioBuffer { + public: + // View on separate audio channel. + typedef ChannelView Channel; + + // Allocator class to allocate aligned floats. + typedef AlignedAllocator<float, kMemoryAlignmentBytes> FloatAllocator; + + // Allocator class to allocate aligned int16s. + typedef AlignedAllocator<int16, kMemoryAlignmentBytes> Int16Allocator; + + // AlignedFloatBuffer for storing audio data. + typedef std::vector<float, FloatAllocator> AlignedFloatVector; + + // AlignedInt16Buffer for storing audio data. + typedef std::vector<int16, Int16Allocator> AlignedInt16Vector; + + // Default constructor initializes an empty |AudioBuffer|. + AudioBuffer(); + + // Constructor. + // + // @param num_channels Number of channels. + // @param num_frames Number of frames. + AudioBuffer(size_t num_channels, size_t num_frames); + + // Move constructor. + AudioBuffer(AudioBuffer&& other); + + // Copy constructor is explicitly deleted to prevent accidental copies. + // Use copy assignment operator instead. + AudioBuffer(const AudioBuffer& other) = delete; + + // Copy assignment from AudioBuffer. + AudioBuffer& operator=(const AudioBuffer& other); + + // Returns the number of audio channels. + size_t num_channels() const { return channel_views_.size(); } + + // Returns the number of frames per buffer. + size_t num_frames() const { return num_frames_; } + + // Returns this buffer's source id. + SourceId source_id() const { return source_id_; } + + // Returns a reference to the selected ChannelView. + Channel& operator[](size_t channel) { + DCHECK_LT(channel, channel_views_.size()); + return channel_views_[channel]; + } + + // Returns a const reference to the selected ChannelView. + const Channel& operator[](size_t channel) const { + DCHECK_LT(channel, channel_views_.size()); + return channel_views_[channel]; + } + + // Copy assignment from std::vector<std::vector<float>>. + AudioBuffer& operator=(const std::vector<std::vector<float>>& other) { + DCHECK_EQ(other.size(), channel_views_.size()); + for (size_t channel = 0; channel < channel_views_.size(); ++channel) { + channel_views_[channel] = other[channel]; + } + return *this; + } + + // += operator + AudioBuffer& operator+=(const AudioBuffer& other) { + DCHECK_EQ(other.num_channels(), num_channels()); + DCHECK_EQ(other.num_frames(), num_frames()); + for (size_t i = 0; i < channel_views_.size(); ++i) + channel_views_[i] += other[i]; + + return *this; + } + + // -= operator + AudioBuffer& operator-=(const AudioBuffer& other) { + DCHECK_EQ(other.num_channels(), num_channels()); + DCHECK_EQ(other.num_frames(), num_frames()); + for (size_t i = 0; i < channel_views_.size(); ++i) + channel_views_[i] -= other[i]; + + return *this; + } + + // Returns an iterator to the ChannelView of the first channel. + std::vector<Channel>::iterator begin() { return channel_views_.begin(); } + + // Returns an iterator to the end of the ChannelView vector. + std::vector<Channel>::iterator end() { return channel_views_.end(); } + + // Returns a const_iterator to the ChannelView of the first channel. + std::vector<Channel>::const_iterator begin() const { + return channel_views_.begin(); + } + + // Returns an const_iterator to the end of the ChannelView vector. + std::vector<Channel>::const_iterator end() const { + return channel_views_.end(); + } + + // Fills all channels with zeros and reenables |Channel|s. + void Clear() { + for (Channel& channel : channel_views_) { + channel.SetEnabled(true); + channel.Clear(); + } + } + + // Returns the number of allocated frames per |Channel|. Note this may + // differ from the actual size of the |Channel| to ensure alignment of all + // |Channel|s. + size_t GetChannelStride() const { + return FindNextAlignedArrayIndex(num_frames_, sizeof(float), + kMemoryAlignmentBytes); + } + + // Sets the source id of which the buffer belongs to. + void set_source_id(SourceId source_id) { source_id_ = source_id; } + + private: + // Allocates memory and initializes vector of |ChannelView|s. + void InitChannelViews(size_t num_channels); + + // Number of frames per buffer. + size_t num_frames_; + + // Audio buffer that sequentially stores multiple audio channels in a planar + // format. + AlignedFloatVector data_; + + // Size of audio buffer. + size_t data_size_; + + // Vector of |AudioBuffer::Channel|s. + std::vector<Channel> channel_views_; + + // Id of a source that this buffer belongs to. + SourceId source_id_; +}; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_AUDIO_BUFFER_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer_test.cc b/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer_test.cc new file mode 100644 index 000000000..2c8f3e20b --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/audio_buffer_test.cc @@ -0,0 +1,144 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/audio_buffer.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "base/constants_and_types.h" + +namespace vraudio { + +namespace { + +// Tests default constructor. +TEST(AudioBuffer, AudioBufferDefaultConstructor) { + AudioBuffer audio_buffer; + EXPECT_EQ(audio_buffer.num_channels(), 0U); + EXPECT_EQ(audio_buffer.num_frames(), 0U); +} + +// Tests initialization of |AudioBuffer|. +TEST(AudioBuffer, AudioBufferInitializationTest) { + static const size_t kNumChannels = 2; + static const size_t kFramesPerBuffer = 16; + AudioBuffer audio_buffer(kNumChannels, kFramesPerBuffer); + + EXPECT_EQ(audio_buffer.num_channels(), kNumChannels); + EXPECT_EQ(audio_buffer.num_frames(), kFramesPerBuffer); + EXPECT_EQ(static_cast<size_t>(audio_buffer.end() - audio_buffer.begin()), + kNumChannels); + + // Test range-based for-loop. + size_t channel_idx = 0; + for (const AudioBuffer::Channel& channel : audio_buffer) { + EXPECT_EQ(channel.begin(), audio_buffer[channel_idx].begin()); + EXPECT_EQ(channel.end(), audio_buffer[channel_idx].end()); + ++channel_idx; + } +} + +// Tests assignment operator from std::vector<std::vector<float>>. +TEST(AudioBuffer, AudioBufferAssignmentOperator) { + const std::vector<std::vector<float>> kTestVector = { + {0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}, {6.0f, 7.0f, 8.0f}}; + + AudioBuffer audio_buffer(kTestVector.size(), kTestVector[0].size()); + audio_buffer = kTestVector; + + for (size_t channel = 0; channel < kTestVector.size(); ++channel) { + for (size_t frame = 0; frame < kTestVector[0].size(); ++frame) { + EXPECT_EQ(audio_buffer[channel][frame], kTestVector[channel][frame]); + } + } +} + +// Tests move constructor. +TEST(AudioBuffer, AudioBufferMoveConstructor) { + const std::vector<std::vector<float>> kTestVector = { + {0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}, {6.0f, 7.0f, 8.0f}}; + + AudioBuffer audio_buffer(kTestVector.size(), kTestVector[0].size()); + audio_buffer = kTestVector; + const size_t num_channels = audio_buffer.num_channels(); + const size_t num_frames = audio_buffer.num_frames(); + + AudioBuffer moved_audio_buffer(std::move(audio_buffer)); + EXPECT_EQ(audio_buffer.num_channels(), 0U); + EXPECT_EQ(audio_buffer.num_frames(), 0U); + EXPECT_EQ(moved_audio_buffer.num_channels(), num_channels); + EXPECT_EQ(moved_audio_buffer.num_frames(), num_frames); + + for (size_t channel = 0; channel < kTestVector.size(); ++channel) { + for (size_t frame = 0; frame < kTestVector[0].size(); ++frame) { + EXPECT_EQ(moved_audio_buffer[channel][frame], + kTestVector[channel][frame]); + } + } +} + +// Tests memory alignment of each channel buffer. The address if the first +// element of each channel should be memory aligned. +TEST(AudioBuffer, TestBufferAlignment) { + static const size_t kNumRuns = 100; + static const size_t kNumChannels = 16; + + for (size_t run = 0; run < kNumRuns; ++run) { + const size_t frames_per_buffer = run + 1; + AudioBuffer audio_buffer(kNumChannels, frames_per_buffer); + for (size_t channel = 0; channel < kNumChannels; ++channel) { + const AudioBuffer::Channel& channel_view = audio_buffer[channel]; + const bool is_aligned = + ((reinterpret_cast<size_t>(&(*channel_view.begin())) & + (kMemoryAlignmentBytes - 1)) == 0); + EXPECT_TRUE(is_aligned); + } + } +} + +// Tests Clear method. +TEST(AudioBuffer, AudioBufferClear) { + const std::vector<std::vector<float>> kTestVector = { + {0.0f, 1.0f, 2.0f}, {3.0f, 4.0f, 5.0f}, {6.0f, 7.0f, 8.0f}}; + + AudioBuffer audio_buffer(kTestVector.size(), kTestVector[0].size()); + audio_buffer = kTestVector; + + audio_buffer.Clear(); + + for (size_t channel = 0; channel < kTestVector.size(); ++channel) { + for (size_t frame = 0; frame < kTestVector[0].size(); ++frame) { + EXPECT_EQ(0.0f, audio_buffer[channel][frame]); + } + } +} + +// Tests GetChannelStride method. +TEST(AudioBuffer, GetChannelStride) { + const size_t num_frames_per_alignment = kMemoryAlignmentBytes / sizeof(float); + for (size_t num_frames = 1; num_frames < num_frames_per_alignment * 5; + ++num_frames) { + AudioBuffer buffer(1, num_frames); + // Fast way to ceil(frame/num_frames_per_alignment). + const size_t expected_num_alignment_blocks = + (num_frames + num_frames_per_alignment - 1) / num_frames_per_alignment; + EXPECT_EQ(expected_num_alignment_blocks * num_frames_per_alignment, + buffer.GetChannelStride()); + } +} + +} // namespace + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/channel_view.cc b/src/3rdparty/resonance-audio/resonance_audio/base/channel_view.cc new file mode 100644 index 000000000..e3d6fe14a --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/channel_view.cc @@ -0,0 +1,50 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/channel_view.h" + +#include "base/simd_utils.h" + +namespace vraudio { + +ChannelView& ChannelView::operator+=(const ChannelView& other) { + DCHECK_EQ(other.size(), size_); + DCHECK(enabled_); + float* this_sample = begin(); + const float* other_sample = other.begin(); + AddPointwise(size_, other_sample, this_sample, this_sample); + return *this; +} + +ChannelView& ChannelView::operator-=(const ChannelView& other) { + DCHECK_EQ(other.size(), size_); + DCHECK(enabled_); + float* this_sample = begin(); + const float* other_sample = other.begin(); + SubtractPointwise(size_, other_sample, this_sample, this_sample); + return *this; +} + +ChannelView& ChannelView::operator*=(const ChannelView& other) { + DCHECK_EQ(other.size(), size_); + DCHECK(enabled_); + float* this_sample = begin(); + const float* other_sample = other.begin(); + MultiplyPointwise(size_, other_sample, this_sample, this_sample); + return *this; +} + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/channel_view.h b/src/3rdparty/resonance-audio/resonance_audio/base/channel_view.h new file mode 100644 index 000000000..557773a99 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/channel_view.h @@ -0,0 +1,138 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_CHANNEL_VIEW_H_ +#define RESONANCE_AUDIO_BASE_CHANNEL_VIEW_H_ + +#include <algorithm> +#include <cstring> +#include <vector> + +#include "base/logging.h" + +namespace vraudio { + +// Provides an interface to a single audio channel in |AudioBuffer|. Note that a +// |ChannelView| instance does not own the data it is initialized with. +class ChannelView { + public: + // Array subscript operator returning a reference. + float& operator[](size_t index) { + DCHECK(enabled_); + DCHECK_LT(index, size_); + return *(begin() + index); + } + + // Const array subscript operator returning a const reference. + const float& operator[](size_t index) const { + DCHECK(enabled_); + DCHECK_LT(index, size_); + return *(begin() + index); + } + + // Returns the size of the channel in samples. + size_t size() const { return size_; } + + // Returns a float pointer to the begin of the channel data. + float* begin() { + DCHECK(enabled_); + return begin_itr_; + } + + // Returns a float pointer to the end of the channel data. + float* end() { + DCHECK(enabled_); + return begin_itr_ + size_; + } + + // Returns a const float pointer to the begin of the channel data. + const float* begin() const { + DCHECK(enabled_); + return begin_itr_; + } + + // Returns a const float pointer to the end of the channel data. + const float* end() const { + DCHECK(enabled_); + return begin_itr_ + size_; + } + + // Copy assignment from float vector. + ChannelView& operator=(const std::vector<float>& other) { + DCHECK(enabled_); + DCHECK_EQ(other.size(), size_); + memcpy(begin(), other.data(), sizeof(float) * size_); + return *this; + } + + // Copy assignment from ChannelView. + ChannelView& operator=(const ChannelView& other) { + if (this != &other) { + DCHECK(enabled_); + DCHECK_EQ(other.size(), size_); + memcpy(begin(), other.begin(), sizeof(float) * size_); + } + return *this; + } + + // Adds a |ChannelView| to this |ChannelView|. + ChannelView& operator+=(const ChannelView& other); + + // Subtracts a |ChannelView| from this |ChannelView|. + ChannelView& operator-=(const ChannelView& other); + + // Pointwise multiplies a |ChannelView| with this |Channelview|. + ChannelView& operator*=(const ChannelView& other); + + // Fills channel buffer with zeros. + void Clear() { + DCHECK(enabled_); + memset(begin(), 0, sizeof(float) * size_); + } + + // Allows for disabling the channel to prevent access to the channel data and + // channel iterators. It is used in the |Mixer| class to prevent the copies of + // silence |ChannelView|s. Note that |ChannelView| are enabled by default. + // + // @param enabled True to enable the channel. + void SetEnabled(bool enabled) { enabled_ = enabled; } + + // Returns true if |ChannelView| is enabled. + // + // @return State of |enabled_| flag. + bool IsEnabled() const { return enabled_; } + + private: + friend class AudioBuffer; + + // Constructor is initialized with a float pointer to the first sample and the + // size of chunk of planar channel data. + ChannelView(float* begin_itr, size_t size) + : begin_itr_(begin_itr), size_(size), enabled_(true) {} + + // Iterator of first and last element in channel. + float* const begin_itr_; + + // Channel size. + const size_t size_; + + // Flag indicating if the channel is enabled. + bool enabled_; +}; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_CHANNEL_VIEW_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/channel_view_test.cc b/src/3rdparty/resonance-audio/resonance_audio/base/channel_view_test.cc new file mode 100644 index 000000000..24b04dfd9 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/channel_view_test.cc @@ -0,0 +1,157 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/channel_view.h" + +#include <vector> + +#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "base/audio_buffer.h" + +namespace vraudio { + +namespace { + +const float kTestData[] = {0.0f, 1.0f, 2.0f}; +const size_t kTestDataSize = sizeof(kTestData) / sizeof(float); + +typedef std::vector<float> Buffer; + +// Tests initialization of |ChannelView| class. +TEST(ChannelView, InitializationTest) { + AudioBuffer test_buffer(1, kTestDataSize); + ChannelView& test_buffer_view = test_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + test_buffer_view[i] = kTestData[i]; + } + + EXPECT_EQ(test_buffer.num_frames(), test_buffer_view.size()); + EXPECT_EQ(&test_buffer[0][0], test_buffer_view.begin()); + EXPECT_EQ(&test_buffer[0][0] + test_buffer.num_frames(), + test_buffer_view.end()); +} + +// Tests iterators and array subscript of |ChannelView|. +TEST(ChannelView, IteratorAndArraySubscriptTest) { + AudioBuffer test_buffer(1, kTestDataSize); + ChannelView& test_buffer_view = test_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + test_buffer_view[i] = kTestData[i]; + } + + for (size_t i = 0; i < test_buffer_view.size(); i++) { + EXPECT_EQ(test_buffer[0][i], test_buffer_view[i]); + EXPECT_EQ(kTestData[i], test_buffer_view[i]); + } + + // Test range-based for-loops. + for (float& sample : test_buffer_view) { + sample *= 2.0f; + } + size_t idx = 0; + for (const float& sample : test_buffer_view) { + EXPECT_EQ(kTestData[idx] * 2.0f, sample); + ++idx; + } +} + +// Tests copy-assignment operators. +TEST(ChannelView, CopyAssignmentTest) { + AudioBuffer test_buffer(1, kTestDataSize); + ChannelView& test_buffer_view = test_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + test_buffer_view[i] = kTestData[i]; + } + + AudioBuffer target_buffer(1, kTestDataSize); + ChannelView& target_vector_view = target_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + target_vector_view[i] = -1.0f; + } + + // Copy assignment from ChannelView. + target_vector_view = test_buffer_view; + + for (size_t i = 0; i < test_buffer_view.size(); i++) { + EXPECT_EQ(test_buffer_view[i], target_vector_view[i]); + } + + AudioBuffer target2_buffer(1, kTestDataSize); + ChannelView& target2_vector_view = target2_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + target2_vector_view[i] = -1.0f; + } + + // Copy assignment from AudioBuffer channel. + target2_vector_view = test_buffer[0]; + + for (size_t i = 0; i < test_buffer_view.size(); i++) { + EXPECT_EQ(test_buffer[0][i], target2_vector_view[i]); + } +} + +// Tests addition-assignment operator. +TEST(ChannelView, AdditionOperatorTest) { + // Here an AudioBuffer is used to ensure that the data is aligned. + AudioBuffer test_buffer(1, kTestDataSize); + ChannelView& test_buffer_view = test_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + test_buffer_view[i] = kTestData[i]; + } + + // Execute |ChannelView|s addition operator. + test_buffer_view += test_buffer_view; + + for (size_t i = 0; i < test_buffer_view.size(); i++) { + EXPECT_EQ(kTestData[i] * 2.0f, test_buffer_view[i]); + } +} + +// Tests subtraction-assignment operator. +TEST(ChannelView, SubtractionOperatorTest) { + // Here an AudioBuffer is used to ensure that the data is aligned. + AudioBuffer test_buffer(1, kTestDataSize); + ChannelView& test_buffer_view = test_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + test_buffer_view[i] = kTestData[i]; + } + + // Execute |ChannelView|s subtraction operator. + test_buffer_view -= test_buffer_view; + + for (size_t i = 0; i < test_buffer_view.size(); i++) { + EXPECT_EQ(0.0f, test_buffer_view[i]); + } +} + +// Tests Clear method. +TEST(ChannelView, ClearTest) { + AudioBuffer test_buffer(1, kTestDataSize); + ChannelView& test_buffer_view = test_buffer[0]; + for (size_t i = 0; i < kTestDataSize; ++i) { + test_buffer_view[i] = kTestData[i]; + } + + test_buffer_view.Clear(); + + for (const float& sample : test_buffer_view) { + EXPECT_EQ(0.0f, sample); + } +} + +} // namespace + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/constants_and_types.h b/src/3rdparty/resonance-audio/resonance_audio/base/constants_and_types.h new file mode 100644 index 000000000..591c958dd --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/constants_and_types.h @@ -0,0 +1,176 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_CONSTANTS_AND_TYPES_H_ +#define RESONANCE_AUDIO_BASE_CONSTANTS_AND_TYPES_H_ + +#include <cmath> +#include <string> // for size_t + +namespace vraudio { + +// Sound object / ambisonic source identifier. + +typedef int SourceId; + +// Invalid source id that can be used to initialize handler variables during +// class construction. +static const SourceId kInvalidSourceId = -1; + + +// Defines memory alignment of audio buffers. Note that not only the first +// element of the |data_| buffer is memory aligned but also the address of the +// first elements of the |ChannelView|s. +const size_t kMemoryAlignmentBytes = 64; + +// Maximum Ambisonic order currently supported in vr audio, equivalent to High +// Quality sound object rendering mode. This number is limited by a) number of +// HRIR data points used in the binaural renderer; b) size of the lookup table +// controlling the angular spread of a sound source in the Ambisonic Lookup +// Table class. +static const int kMaxSupportedAmbisonicOrder = 3; + +// Maximum allowed size of internal buffers. +const size_t kMaxSupportedNumFrames = 16384; + +// Number of mono channels. +static const size_t kNumMonoChannels = 1; + +// Number of stereo channels. +static const size_t kNumStereoChannels = 2; + +// Number of surround 5.1 channels. +static const size_t kNumSurroundFiveDotOneChannels = 6; + +// Number of surround 7.1 channels. +static const size_t kNumSurroundSevenDotOneChannels = 8; + +// Number of first-order ambisonic channels. +static const size_t kNumFirstOrderAmbisonicChannels = 4; + +// Number of second-order ambisonic channels. +static const size_t kNumSecondOrderAmbisonicChannels = 9; + +// Number of third-order ambisonic channels. +static const size_t kNumThirdOrderAmbisonicChannels = 16; + +// Number of first-order ambisonic with non-diegetic stereo channels. +static const size_t kNumFirstOrderAmbisonicWithNonDiegeticStereoChannels = 6; + +// Number of second-order ambisonic with non-diegetic stereo channels. +static const size_t kNumSecondOrderAmbisonicWithNonDiegeticStereoChannels = 11; + +// Number of third-order ambisonic with non-diegetic stereo channels. +static const size_t kNumThirdOrderAmbisonicWithNonDiegeticStereoChannels = 18; + +// Negative 60dB in amplitude. +static const float kNegative60dbInAmplitude = 0.001f; + +// Tolerated error margins for floating points. +static const double kEpsilonDouble = 1e-6; +static const float kEpsilonFloat = 1e-6f; + +// Inverse square root of two (equivalent to -3dB audio signal attenuation). +static const float kInverseSqrtTwo = 1.0f / std::sqrt(2.0f); + +// Square roots. +static const float kSqrtTwo = std::sqrt(2.0f); +static const float kSqrtThree = std::sqrt(3.0f); + +// Pi in radians. +static const float kPi = static_cast<float>(M_PI); +// Half pi in radians. +static const float kHalfPi = static_cast<float>(M_PI / 2.0); +// Two pi in radians. +static const float kTwoPi = static_cast<float>(2.0 * M_PI); + +// Defines conversion factor from degrees to radians. +static const float kRadiansFromDegrees = static_cast<float>(M_PI / 180.0); + +// Defines conversion factor from radians to degrees. +static const float kDegreesFromRadians = static_cast<float>(180.0 / M_PI); + +// The negated natural logarithm of 1000. +static const float kNegativeLog1000 = -std::log(1000.0f); + +// The lowest octave band for computing room effects. +static const float kLowestOctaveBandHz = 31.25f; + +// Number of octave bands in which room effects are computed. +static const size_t kNumReverbOctaveBands = 9; + +// Centers of possible frequency bands up 8 kHz. +// ------------------------------------ +// Band no. Low Center High [Frequencies in Hz] +// ------------------------------------ +// 0 22 31.25 44.2 +// 1 44.2 62.5 88.4 +// 2 88.4 125 176.8 +// 3 176.8 250 353.6 +// 4 353.6 500 707.1 +// 5 707.1 1000 1414.2 +// 6 1414.2 2000 2828.4 +// 7 2828.4 4000 5656.9 +// 8 5656.9 8000 11313.7 +//-------------------------------------- +const float kOctaveBandCentres[kNumReverbOctaveBands] = { + 31.25f, 62.5f, 125.0f, 250.0f, 500.0f, 1000.0f, 2000.0f, 4000.0f, 8000.0f}; + +// Number of surfaces in a shoe-box room. +static const size_t kNumRoomSurfaces = 6; + +// Speed of sound in air at 20 degrees Celsius in meters per second. +// http://www.sengpielaudio.com/calculator-speedsound.htm +static const float kSpeedOfSound = 343.0f; + +// Locations of the stereo virtual loudspeakers in degrees. +static const float kStereoLeftDegrees = 90.0f; +static const float kStereoRightDegrees = -90.0f; + +// Conversion factor from seconds to milliseconds. +static const float kMillisecondsFromSeconds = 1000.0f; + +// Conversion factor from milliseconds to seconds. +static const float kSecondsFromMilliseconds = 0.001f; + +// Conversion factor from seconds to milliseconds. +static const double kMicrosecondsFromSeconds = 1e6; + +// Conversion factor from milliseconds to seconds. +static const double kSecondsFromMicroseconds = 1e-6; + +// The distance threshold where the near field effect should fade in. +static const float kNearFieldThreshold = 1.0f; + +// Minimum allowed distance of a near field sound source used to cap the allowed +// energy boost. +static const float kMinNearFieldDistance = 0.1f; + +// Maximum gain applied by Near Field Effect to the mono source signal. +static const float kMaxNearFieldEffectGain = 9.0f; + +// Number of samples across which the gain value should be interpolated for +// a unit gain change of 1.0f. + +static const size_t kUnitRampLength = 2048; + +// Rotation quantization which applies in ambisonic soundfield rotators. + +static const float kRotationQuantizationRad = 1.0f * kRadiansFromDegrees; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_CONSTANTS_AND_TYPES_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/integral_types.h b/src/3rdparty/resonance-audio/resonance_audio/base/integral_types.h new file mode 100644 index 000000000..7a4d9216e --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/integral_types.h @@ -0,0 +1,121 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Basic integer type definitions for various platforms + +#ifndef BASE_INTEGRAL_TYPES_H_ +#define BASE_INTEGRAL_TYPES_H_ + +// Standard typedefs +typedef signed char schar; +typedef signed char int8; +typedef short int16; +typedef int int32; +#ifdef _MSC_VER +typedef __int64 int64; +#else +typedef long long int64; +#endif /* _MSC_VER */ + +// NOTE: unsigned types are DANGEROUS in loops and other arithmetical +// places. Use the signed types unless your variable represents a bit +// pattern (eg a hash value) or you really need the extra bit. Do NOT +// use 'unsigned' to express "this value should always be positive"; +// use assertions for this. + +typedef unsigned char uint8; +typedef unsigned short uint16; +typedef unsigned int uint32; +#ifdef _MSC_VER +typedef unsigned __int64 uint64; +#else +typedef unsigned long long uint64; +#endif /* _MSC_VER */ + +// A type to represent a Unicode code-point value. As of Unicode 4.0, +// such values require up to 21 bits. +// (For type-checking on pointers, make this explicitly signed, +// and it should always be the signed version of whatever int32 is.) +typedef signed int char32; + +// A type to represent a natural machine word (for e.g. efficiently +// scanning through memory for checksums or index searching). Don't use +// this for storing normal integers. Ideally this would be just +// unsigned int, but our 64-bit architectures use the LP64 model +// (http://en.wikipedia.org/wiki/64-bit_computing#64-bit_data_models), hence +// their ints are only 32 bits. We want to use the same fundamental +// type on all archs if possible to preserve *printf() compatability. +typedef unsigned long uword_t; + +// long long macros to be used because gcc and vc++ use different suffixes, +// and different size specifiers in format strings +#undef GG_LONGLONG +#undef GG_ULONGLONG +#undef GG_LL_FORMAT + +#ifdef _MSC_VER /* if Visual C++ */ + +// VC++ long long suffixes +#define GG_LONGLONG(x) x##I64 +#define GG_ULONGLONG(x) x##UI64 + +// Length modifier in printf format string for int64's (e.g. within %d) +#define GG_LL_FORMAT "I64" // As in printf("%I64d", ...) +#define GG_LL_FORMAT_W L"I64" + +#else /* not Visual C++ */ + +#define GG_LONGLONG(x) x##LL +#define GG_ULONGLONG(x) x##ULL +#define GG_LL_FORMAT "ll" // As in "%lld". Note that "q" is poor form also. +#define GG_LL_FORMAT_W L"ll" + +#endif // _MSC_VER + + +static const uint8 kuint8max = (( uint8) 0xFF); +static const uint16 kuint16max = ((uint16) 0xFFFF); +static const uint32 kuint32max = ((uint32) 0xFFFFFFFF); +static const uint64 kuint64max = ((uint64) GG_LONGLONG(0xFFFFFFFFFFFFFFFF)); +static const int8 kint8min = (( int8) ~0x7F); +static const int8 kint8max = (( int8) 0x7F); +static const int16 kint16min = (( int16) ~0x7FFF); +static const int16 kint16max = (( int16) 0x7FFF); +static const int32 kint32min = (( int32) ~0x7FFFFFFF); +static const int32 kint32max = (( int32) 0x7FFFFFFF); +static const int64 kint64min = (( int64) GG_LONGLONG(~0x7FFFFFFFFFFFFFFF)); +static const int64 kint64max = (( int64) GG_LONGLONG(0x7FFFFFFFFFFFFFFF)); + + +typedef uint64 Fprint; +static const Fprint kIllegalFprint = 0; +static const Fprint kMaxFprint = GG_ULONGLONG(0xFFFFFFFFFFFFFFFF); + +#endif // BASE_INTEGRAL_TYPES_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/logging.h b/src/3rdparty/resonance-audio/resonance_audio/base/logging.h new file mode 100644 index 000000000..63dd00431 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/logging.h @@ -0,0 +1,108 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_PLATFORM_LOGGING_H_ +#define RESONANCE_AUDIO_PLATFORM_LOGGING_H_ + +#include <cstdlib> + +#include <cassert> +#include <iostream> +#include <sstream> + +#undef DCHECK +#undef DCHECK_EQ +#undef DCHECK_NE +#undef DCHECK_LE +#undef DCHECK_LT +#undef DCHECK_GE +#undef DCHECK_GT +#undef CHECK +#undef CHECK_EQ +#undef CHECK_NE +#undef CHECK_LE +#undef CHECK_LT +#undef CHECK_GE +#undef CHECK_GT +#undef CHECK_NOTNULL +#undef LOG + +// This class is used to disable logging, while still allowing for log messages +// to contain '<<' expressions. +class NullLogger { + public: + std::ostream& GetStream() { + static std::ostream kNullStream(nullptr); + return kNullStream; + } +}; + +// If statement prevents unused variable warnings. +#define DCHECK(expr) \ + if (false && (expr)) \ + ; \ + else \ + NullLogger().GetStream() +#define DCHECK_OP(val1, val2, op) DCHECK((val1)op(val2)) + +#define DCHECK_EQ(val1, val2) DCHECK_OP((val1), (val2), ==) +#define DCHECK_NE(val1, val2) DCHECK_OP((val1), (val2), !=) +#define DCHECK_LE(val1, val2) DCHECK_OP((val1), (val2), <=) +#define DCHECK_LT(val1, val2) DCHECK_OP((val1), (val2), <) +#define DCHECK_GE(val1, val2) DCHECK_OP((val1), (val2), >=) +#define DCHECK_GT(val1, val2) DCHECK_OP((val1), (val2), >) + +// This class is used to log to std::cerr. +class FatalLogger { + public: + FatalLogger(const char* file, int line) { + error_string_ << file << ":" << line << ": "; + } + ~FatalLogger() { + const std::string error_string = error_string_.str(); + std::cerr << error_string << std::endl; + abort(); + } + std::ostream& GetStream() { return error_string_; } + + private: + std::ostringstream error_string_; +}; + +#define CHECK(condition) \ + !(condition) ? FatalLogger(__FILE__, __LINE__).GetStream() \ + : NullLogger().GetStream() + +#define CHECK_OP(val1, val2, op) CHECK((val1)op(val2)) + +#define CHECK_EQ(val1, val2) CHECK_OP((val1), (val2), ==) +#define CHECK_NE(val1, val2) CHECK_OP((val1), (val2), !=) +#define CHECK_LE(val1, val2) CHECK_OP((val1), (val2), <=) +#define CHECK_LT(val1, val2) CHECK_OP((val1), (val2), <) +#define CHECK_GE(val1, val2) CHECK_OP((val1), (val2), >=) +#define CHECK_GT(val1, val2) CHECK_OP((val1), (val2), >) + +// Helper for CHECK_NOTNULL(), using C++11 perfect forwarding. +template <typename T> +T CheckNotNull(T&& t) { + assert(t != nullptr); + return std::forward<T>(t); +} +#define CHECK_NOTNULL(val) CheckNotNull(val) + +#define LOG(severity) NullLogger().GetStream() + +#endif // RESONANCE_AUDIO_PLATFORM_LOGGING_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/misc_math.cc b/src/3rdparty/resonance-audio/resonance_audio/base/misc_math.cc new file mode 100644 index 000000000..4de96ba41 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/misc_math.cc @@ -0,0 +1,94 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/misc_math.h" + +namespace vraudio { + +WorldPosition::WorldPosition() { setZero(); } + +WorldRotation::WorldRotation() { setIdentity(); } + +bool LinearLeastSquareFitting(const std::vector<float>& x_array, + const std::vector<float>& y_array, float* slope, + float* intercept, float* r_squared) { + // The array sizes must agree. + if (x_array.size() != y_array.size()) { + return false; + } + + // At least two points are needed to fit a line. + if (x_array.size() < 2) { + return false; + } + + float x_sum = 0.0f; + float y_sum = 0.0f; + float x_square_sum = 0.0f; + float xy_sum = 0.0f; + + for (size_t i = 0; i < x_array.size(); ++i) { + const float x = x_array[i]; + const float y = y_array[i]; + x_sum += x; + y_sum += y; + x_square_sum += x * x; + xy_sum += x * y; + } + + const float n_inverse = 1.0f / static_cast<float>(x_array.size()); + const float x_mean = x_sum * n_inverse; + const float y_mean = y_sum * n_inverse; + const float x_square_mean = x_square_sum * n_inverse; + const float xy_mean = xy_sum * n_inverse; + const float x_mean_square = x_mean * x_mean; + + // Prevent division by zero, which means a vertical line and the slope is + // infinite. + if (x_square_mean == x_mean_square) { + return false; + } + + *slope = (xy_mean - x_mean * y_mean) / (x_square_mean - x_mean_square); + *intercept = y_mean - *slope * x_mean; + + // Compute the coefficient of determination. + float total_sum_of_squares = 0.0f; + float residual_sum_of_squares = 0.0f; + for (size_t i = 0; i < x_array.size(); ++i) { + const float y_i = y_array[i]; + total_sum_of_squares += (y_i - y_mean) * (y_i - y_mean); + const float y_fit = *slope * x_array[i] + *intercept; + residual_sum_of_squares += (y_fit - y_i) * (y_fit - y_i); + } + + if (total_sum_of_squares == 0.0f) { + if (residual_sum_of_squares == 0.0f) { + // A special case where all y's are equal, where the |r_squared| should + // be 1.0, and the line is a perfectly horizontal line. + *r_squared = 1.0f; + return true; + } else { + // Division by zero. + return false; + } + } + + *r_squared = 1.0f - residual_sum_of_squares / total_sum_of_squares; + return true; +} + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/misc_math.h b/src/3rdparty/resonance-audio/resonance_audio/base/misc_math.h new file mode 100644 index 000000000..5993eb55d --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/misc_math.h @@ -0,0 +1,385 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_MISC_MATH_H_ +#define RESONANCE_AUDIO_BASE_MISC_MATH_H_ + +#ifndef _USE_MATH_DEFINES +#define _USE_MATH_DEFINES // Enable MSVC math constants (e.g., M_PI). +#endif // _USE_MATH_DEFINES + +#include <algorithm> +#include <cmath> +#include <cstdint> +#include <limits> +#include <utility> +#include <vector> + +#include "base/integral_types.h" +#include "Eigen/Dense" +#include "base/constants_and_types.h" +#include "base/logging.h" + +namespace vraudio { + +class WorldPosition : public Eigen::Matrix<float, 3, 1, Eigen::DontAlign> { + public: + // Inherits all constructors with 1-or-more arguments. Necessary because + // MSVC12 doesn't support inheriting constructors. + template <typename Arg1, typename... Args> + WorldPosition(const Arg1& arg1, Args&&... args) + : Matrix(arg1, std::forward<Args>(args)...) {} + + // Constructs a zero vector. + WorldPosition(); + + // Returns True if other |WorldPosition| differs by at least |kEpsilonFloat|. + bool operator!=(const WorldPosition& other) const { + return std::abs(this->x() - other.x()) > kEpsilonFloat || + std::abs(this->y() - other.y()) > kEpsilonFloat || + std::abs(this->z() - other.z()) > kEpsilonFloat; + } +}; + +class WorldRotation : public Eigen::Quaternion<float, Eigen::DontAlign> { + public: + // Inherits all constructors with 1-or-more arguments. Necessary because + // MSVC12 doesn't support inheriting constructors. + template <typename Arg1, typename... Args> + WorldRotation(const Arg1& arg1, Args&&... args) + : Quaternion(arg1, std::forward<Args>(args)...) {} + + // Constructs an identity rotation. + WorldRotation(); + + // Returns the shortest arc between two |WorldRotation|s in radians. + float AngularDifferenceRad(const WorldRotation& other) const { + const Quaternion difference = this->inverse() * other; + return static_cast<float>(Eigen::AngleAxisf(difference).angle()); + } +}; + +typedef Eigen::AngleAxis<float> AngleAxisf; + +typedef WorldPosition AudioPosition; + +typedef WorldRotation AudioRotation; + +// Converts |world_position| into an equivalent audio space position. +// The world space follows the typical CG coordinate system convention: +// Positive x points right, positive y points up, negative z points forward. +// The audio space follows the ambiX coordinate system convention that is +// commonly accepted in literature [http://goo.gl/XdYNm9]: +// Positive x points forward, negative y points right, positive z points up. +// Positions in both world space and audio space are in meters. +// +// @param world_position 3D position in world space. +// @param audio_position Output 3D position in audio space. +inline void ConvertAudioFromWorldPosition(const WorldPosition& world_position, + AudioPosition* audio_position) { + DCHECK(audio_position); + (*audio_position)(0) = -world_position[2]; + (*audio_position)(1) = -world_position[0]; + (*audio_position)(2) = world_position[1]; +} + +// Converts |audio_position| into an equivalent world space position. +// The world space follows the typical CG coordinate system convention: +// Positive x points right, positive y points up, negative z points forward. +// The audio space follows the ambiX coordinate system convention that is +// commonly accepted in literature [http://goo.gl/XdYNm9]: +// Positive x points forward, negative y points right, positive z points up. +// Positions in both world space and audio space are in meters. +// +// @param audio_position 3D position in audio space. +// @param world_position Output 3D position in world space. +inline void ConvertWorldFromAudioPosition(const AudioPosition& audio_position, + AudioPosition* world_position) { + DCHECK(world_position); + (*world_position)(0) = -audio_position[1]; + (*world_position)(1) = audio_position[2]; + (*world_position)(2) = -audio_position[0]; +} + +// Converts |world_rotation| into an equivalent audio space rotation. +// The world space follows the typical CG coordinate system convention: +// Positive x points right, positive y points up, negative z points forward. +// The audio space follows the ambiX coordinate system convention that is +// commonly accepted in literature [http://goo.gl/XdYNm9]: +// Positive x points forward, negative y points right, positive z points up. +// Positions in both world space and audio space are in meters. +// +// @param world_rotation 3D rotation in world space. +// @param audio_rotation Output 3D rotation in audio space. +inline void ConvertAudioFromWorldRotation(const WorldRotation& world_rotation, + AudioRotation* audio_rotation) { + DCHECK(audio_rotation); + audio_rotation->w() = world_rotation.w(); + audio_rotation->x() = -world_rotation.x(); + audio_rotation->y() = world_rotation.y(); + audio_rotation->z() = -world_rotation.z(); +} + +// Returns the relative direction vector |from_position| and |to_position| by +// rotating the relative position vector with respect to |from_rotation|. +// +// @param from_position Origin position of the direction. +// @param from_rotation Origin orientation of the direction. +// @param to_position Target position of the direction. +// @param relative_direction Relative direction vector (not normalized). +inline void GetRelativeDirection(const WorldPosition& from_position, + const WorldRotation& from_rotation, + const WorldPosition& to_position, + WorldPosition* relative_direction) { + DCHECK(relative_direction); + *relative_direction = + from_rotation.conjugate() * (to_position - from_position); +} + +// Returns the closest relative position in an axis-aligned bounding box to the +// given |relative_position|. +// +// @param position Input position relative to the center of the bounding box. +// @param aabb_dimensions Bounding box dimensions. +// @return aabb bounded position. +inline void GetClosestPositionInAabb(const WorldPosition& relative_position, + const WorldPosition& aabb_dimensions, + WorldPosition* closest_position) { + DCHECK(closest_position); + const WorldPosition aabb_offset = 0.5f * aabb_dimensions; + (*closest_position)[0] = + std::min(std::max(relative_position[0], -aabb_offset[0]), aabb_offset[0]); + (*closest_position)[1] = + std::min(std::max(relative_position[1], -aabb_offset[1]), aabb_offset[1]); + (*closest_position)[2] = + std::min(std::max(relative_position[2], -aabb_offset[2]), aabb_offset[2]); +} + +// Returns true if given world |position| is in given axis-aligned bounding box. +// +// @param position Position to be tested. +// @param aabb_center Bounding box center. +// @param aabb_dimensions Bounding box dimensions. +// @return True if |position| is within bounding box, false otherwise. +inline bool IsPositionInAabb(const WorldPosition& position, + const WorldPosition& aabb_center, + const WorldPosition& aabb_dimensions) { + return std::abs(position[0] - aabb_center[0]) <= 0.5f * aabb_dimensions[0] && + std::abs(position[1] - aabb_center[1]) <= 0.5f * aabb_dimensions[1] && + std::abs(position[2] - aabb_center[2]) <= 0.5f * aabb_dimensions[2]; +} + +// Returns true if an integer overflow occurred during the calculation of +// x = a * b. +// +// @param a First multiplicand. +// @param b Second multiplicand. +// @param x Product. +// @return True if integer overflow occurred, false otherwise. +template <typename T> +inline bool DoesIntegerMultiplicationOverflow(T a, T b, T x) { + // Detects an integer overflow occurs by inverting the multiplication and + // testing for x / a != b. + return a == 0 ? false : (x / a != b); +} + +// Returns true if an integer overflow occurred during the calculation of +// a + b. +// +// @param a First summand. +// @param b Second summand. +// @return True if integer overflow occurred, false otherwise. +template <typename T> +inline bool DoesIntegerAdditionOverflow(T a, T b) { + T x = a + b; + return x < b; +} + +// Safely converts an int to a size_t. +// +// @param i Integer input. +// @param x Size_t output. +// @return True if integer overflow occurred, false otherwise. +inline bool DoesIntSafelyConvertToSizeT(int i, size_t* x) { + if (i < 0) { + return false; + } + *x = static_cast<size_t>(i); + return true; +} + +// Safely converts a size_t to an int. +// +// @param i Size_t input. +// @param x Integer output. +// @return True if integer overflow occurred, false otherwise. +inline bool DoesSizeTSafelyConvertToInt(size_t i, int* x) { + if (i > static_cast<size_t>(std::numeric_limits<int>::max())) { + return false; + } + *x = static_cast<int>(i); + return true; +} + +// Finds the greatest common divisor between two integer values using the +// Euclidean algorithm. Always returns a positive integer. +// +// @param a First of the two integer values. +// @param b second of the two integer values. +// @return The greatest common divisor of the two integer values. +inline int FindGcd(int a, int b) { + a = std::abs(a); + b = std::abs(b); + int temp_value = 0; + while (b != 0) { + temp_value = b; + b = a % b; + a = temp_value; + } + return a; +} + +// Finds the next power of two from an integer. This method works with values +// representable by unsigned 32 bit integers. +// +// @param input Integer value. +// @return The next power of two from |input|. +inline size_t NextPowTwo(size_t input) { + // Ensure the value fits in a uint32_t. + DCHECK_LT(static_cast<uint64_t>(input), + static_cast<uint64_t>(std::numeric_limits<uint32_t>::max())); + uint32_t number = static_cast<uint32_t>(--input); + number |= number >> 1; // Take care of 2 bit numbers. + number |= number >> 2; // Take care of 4 bit numbers. + number |= number >> 4; // Take care of 8 bit numbers. + number |= number >> 8; // Take care of 16 bit numbers. + number |= number >> 16; // Take care of 32 bit numbers. + number++; + return static_cast<size_t>(number); +} + +// Returns the factorial (!) of x. If x < 0, it returns 0. +inline float Factorial(int x) { + if (x < 0) return 0.0f; + float result = 1.0f; + for (; x > 0; --x) result *= static_cast<float>(x); + return result; +} + +// Returns the double factorial (!!) of x. +// For odd x: 1 * 3 * 5 * ... * (x - 2) * x +// For even x: 2 * 4 * 6 * ... * (x - 2) * x +// If x < 0, it returns 0. +inline float DoubleFactorial(int x) { + if (x < 0) return 0.0f; + float result = 1.0f; + for (; x > 0; x -= 2) result *= static_cast<float>(x); + return result; +} + +// This is a *safe* alternative to std::equal function as a workaround in order +// to avoid MSVC compiler warning C4996 for unchecked iterators (see +// https://msdn.microsoft.com/en-us/library/aa985965.aspx). +// Also note that, an STL equivalent of this function was introduced in C++14 to +// be replaced with this implementation (see version (5) in +// http://en.cppreference.com/w/cpp/algorithm/equal). +template <typename Iterator> +inline bool EqualSafe(const Iterator& lhs_begin, const Iterator& lhs_end, + const Iterator& rhs_begin, const Iterator& rhs_end) { + auto lhs_itr = lhs_begin; + auto rhs_itr = rhs_begin; + while (lhs_itr != lhs_end && rhs_itr != rhs_end) { + if (*lhs_itr != *rhs_itr) { + return false; + } + ++lhs_itr; + ++rhs_itr; + } + return lhs_itr == lhs_end && rhs_itr == rhs_end; +} + +// Fast reciprocal of square-root. See: https://goo.gl/fqvstz for details. +// +// @param input The number to be inverse rooted. +// @return An approximation of the reciprocal square root of |input|. +inline float FastReciprocalSqrt(float input) { + const float kThreeHalfs = 1.5f; + const uint32_t kMagicNumber = 0x5f3759df; + + // Approximate a logarithm by aliasing to an integer. + uint32_t integer; + memcpy(&integer, &input, sizeof(float)); + integer = kMagicNumber - (integer >> 1); + float approximation; + memcpy(&approximation, &integer, sizeof(float)); + const float half_input = input * 0.5f; + // One iteration of Newton's method. + return approximation * + (kThreeHalfs - (half_input * approximation * approximation)); +} + +// Finds the best-fitting line to a given set of 2D points by minimizing the +// sum of the squares of the vertical (along y-axis) offsets. The slope and +// intercept of the fitted line are recorded, as well as the coefficient of +// determination, which gives the quality of the fitting. +// See http://mathworld.wolfram.com/LeastSquaresFitting.html for how to compute +// these values. +// +// @param x_array Array of the x coordinates of the points. +// @param y_array Array of the y coordinates of the points. +// @param slope Output slope of the fitted line. +// @param intercept Output slope of the fitted line. +// @param r_squared Coefficient of determination. +// @return False if the fitting fails. +bool LinearLeastSquareFitting(const std::vector<float>& x_array, + const std::vector<float>& y_array, float* slope, + float* intercept, float* r_squared); + +// Computes |base|^|exp|, where |exp| is a *non-negative* integer, with the +// squared exponentiation (a.k.a double-and-add) method. +// When T is a floating point type, this has the same semantics as pow(), but +// is much faster. +// T can also be any integral type, in which case computations will be +// performed in the value domain of this integral type, and overflow semantics +// will be those of T. +// You can also use any type for which operator*= is defined. +// See : + +// This method is reproduced here so vraudio classes don't need to depend on +// //util/math/mathutil.h +// +// @tparam base Input to the exponent function. Any type for which *= is +// defined. +// @param exp Integer exponent, must be greater than or equal to zero. +// @return |base|^|exp|. +template <typename T> +static inline T IntegerPow(T base, int exp) { + DCHECK_GE(exp, 0); + T result = static_cast<T>(1); + while (true) { + if (exp & 1) { + result *= base; + } + exp >>= 1; + if (!exp) break; + base *= base; + } + return result; +} + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_MISC_MATH_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/misc_math_test.cc b/src/3rdparty/resonance-audio/resonance_audio/base/misc_math_test.cc new file mode 100644 index 000000000..e75c100c6 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/misc_math_test.cc @@ -0,0 +1,373 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/misc_math.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" + +#include "base/constants_and_types.h" + +namespace vraudio { + +namespace { + +TEST(MiscMath, WorldPositionEqualityTest) { + const WorldPosition kOriginalWorldPosition(0.33f, 0.44f, 0.55f); + const WorldPosition kSameWorldPosition = kOriginalWorldPosition; + EXPECT_FALSE(kOriginalWorldPosition != kSameWorldPosition); +} + +TEST(MiscMath, WorldPositionInequalityTest) { + const WorldPosition kOriginalWorldPosition(0.11f, 0.22f, 0.33f); + const std::vector<WorldPosition> kDifferentWorldPositions{ + {-0.22f, 0.22f, 0.33f}, {0.11f, -0.33f, 0.33f}, {0.11f, 0.22f, -0.22f}, + {0.11f, 0.33f, -0.44f}, {0.22f, 0.22f, -0.44f}, {0.22f, 0.33f, -0.55f}}; + + for (auto& position : kDifferentWorldPositions) { + EXPECT_TRUE(kOriginalWorldPosition != position); + } +} + +TEST(MiscMath, ConvertAudioFromWorldPositionTest) { + static const WorldPosition kWorldPosition(0.5f, -1.2f, 10.f); + static const AudioPosition kExpectedAudioPosition( + -kWorldPosition[2], -kWorldPosition[0], kWorldPosition[1]); + AudioPosition test_position; + ConvertAudioFromWorldPosition(kWorldPosition, &test_position); + + EXPECT_TRUE(kExpectedAudioPosition.isApprox(test_position, kEpsilonFloat)); +} + +TEST(MiscMath, ConvertWorldFromAudioPositionTest) { + static const AudioPosition kAudioPosition(1.0f, 2.0f, -0.2f); + static const WorldPosition kExpectedWorldPosition( + -kAudioPosition[1], kAudioPosition[2], -kAudioPosition[0]); + + WorldPosition test_position; + ConvertWorldFromAudioPosition(kAudioPosition, &test_position); + + EXPECT_TRUE(kExpectedWorldPosition.isApprox(test_position, kEpsilonFloat)); +} + +TEST(MiscMath, ConvertAudioFromWorldRotationTest) { + static const WorldRotation kWorldRotation(1.0f, 0.5f, -1.2f, 10.f); + static const AudioRotation kExpectedAudioRotation( + kWorldRotation.w(), -kWorldRotation.x(), kWorldRotation.y(), + -kWorldRotation.z()); + AudioRotation test_rotation; + ConvertAudioFromWorldRotation(kWorldRotation, &test_rotation); + + EXPECT_TRUE(kExpectedAudioRotation.isApprox(test_rotation, kEpsilonFloat)); +} + +TEST(MiscMath, GetRelativeDirectionTest) { + static const WorldPosition kFromPosition(0.0f, 0.0f, 0.0f); + static const WorldPosition kFromRotationAxis(0.0f, 0.0f, 1.0f); + static const float kFromRotationAngle = static_cast<float>(M_PI / 2.0); + WorldRotation kFromRotation = + WorldRotation(AngleAxisf(kFromRotationAngle, kFromRotationAxis)); + + static const WorldPosition kToPosition(1.0f, 2.0f, 3.0f); + + static const WorldPosition kExpectedRelativeDirection(2.0f, -1.0f, 3.0f); + WorldPosition test_relative_direction; + GetRelativeDirection(kFromPosition, kFromRotation, kToPosition, + &test_relative_direction); + + EXPECT_TRUE(kExpectedRelativeDirection.isApprox(test_relative_direction, + kEpsilonFloat)); +} + +TEST(MiscMath, GetClosestPositionInAabbInsideTest) { + static const WorldPosition kRelativeSourcePosition(0.0f, -0.2f, 0.0f); + static const WorldPosition kRoomDimensions(1.0f, 1.0f, 1.0f); + static const WorldPosition kExpectedAabbPosition = kRelativeSourcePosition; + WorldPosition test_position; + GetClosestPositionInAabb(kRelativeSourcePosition, kRoomDimensions, + &test_position); + + EXPECT_TRUE(kExpectedAabbPosition.isApprox(test_position, kEpsilonFloat)); +} + +TEST(MiscMath, GetClosestPositionInAabbOutsideTest) { + static const WorldPosition kRelativeSourcePosition(0.2f, 0.7f, -0.5f); + static const WorldPosition kRoomDimensions(1.0f, 1.0f, 1.0f); + static const WorldPosition kExpectedAabbPosition(0.2f, 0.5f, -0.5f); + WorldPosition test_position; + GetClosestPositionInAabb(kRelativeSourcePosition, kRoomDimensions, + &test_position); + + EXPECT_TRUE(kExpectedAabbPosition.isApprox(test_position, kEpsilonFloat)); +} + +TEST(MiscMath, IsPositionInAabbInsideTest) { + static const WorldPosition kSourcePosition(0.5f, 0.3f, 0.2f); + static const WorldPosition kRoomPosition(0.5f, 0.5f, 0.5f); + static const WorldPosition kRoomDimensions(1.0f, 1.0f, 1.0f); + + EXPECT_TRUE( + IsPositionInAabb(kSourcePosition, kRoomPosition, kRoomDimensions)); +} + +TEST(MiscMath, IsPositionInAabbOutsideTest) { + static const WorldPosition kSourcePosition(0.7f, 1.2f, 0.0f); + static const WorldPosition kRoomPosition(0.5f, 0.5f, 0.5f); + static const WorldPosition kRoomDimensions(1.0f, 1.0f, 1.0f); + + EXPECT_FALSE( + IsPositionInAabb(kSourcePosition, kRoomPosition, kRoomDimensions)); +} + +TEST(MiscMath, IntegerMultiplicationOverflowDetection) { + static const size_t kMaxValue = std::numeric_limits<size_t>::max(); + static const size_t kHalfMaxValue = kMaxValue / 2; + + // 2 * 3 == 6 should not lead to an integer overflow. + EXPECT_FALSE(DoesIntegerMultiplicationOverflow<size_t>(2, 3, 6)); + + EXPECT_FALSE(DoesIntegerMultiplicationOverflow<size_t>(kHalfMaxValue, 2, + kHalfMaxValue * 2)); + EXPECT_TRUE( + DoesIntegerMultiplicationOverflow<size_t>(kMaxValue, 2, kMaxValue << 1)); + EXPECT_FALSE( + DoesIntegerMultiplicationOverflow<size_t>(0, kMaxValue, 0 * kMaxValue)); + EXPECT_FALSE( + DoesIntegerMultiplicationOverflow<size_t>(kMaxValue, 0, kMaxValue * 0)); +} + +TEST(MiscMath, DoesIntegerAdditionOverflow) { + static const size_t kMaxValue = std::numeric_limits<size_t>::max(); + static const size_t kHalfMaxValue = kMaxValue / 2; + + EXPECT_FALSE( + DoesIntegerAdditionOverflow<size_t>(kHalfMaxValue, kHalfMaxValue)); + EXPECT_TRUE(DoesIntegerAdditionOverflow<size_t>(kMaxValue, kHalfMaxValue)); + EXPECT_TRUE(DoesIntegerAdditionOverflow<size_t>(1, kMaxValue)); + EXPECT_FALSE(DoesIntegerAdditionOverflow<size_t>(kMaxValue, 0)); + EXPECT_FALSE(DoesIntegerAdditionOverflow<size_t>(0, kMaxValue)); +} + +TEST(MiscMath, DoesIntSafelyConvertToSizeT) { + static const int kMaxIntValue = std::numeric_limits<int>::max(); + size_t test_size_t; + EXPECT_TRUE(DoesIntSafelyConvertToSizeT(kMaxIntValue, &test_size_t)); + EXPECT_EQ(static_cast<size_t>(kMaxIntValue), test_size_t); + EXPECT_TRUE(DoesIntSafelyConvertToSizeT(0, &test_size_t)); + EXPECT_EQ(0U, test_size_t); + EXPECT_FALSE(DoesIntSafelyConvertToSizeT(-1, &test_size_t)); +} + +TEST(MiscMath, DoesSizeTSafelyConvertToInt) { + static const size_t kMaxIntValue = std::numeric_limits<size_t>::max(); + int test_int; + + EXPECT_FALSE(DoesSizeTSafelyConvertToInt(kMaxIntValue, &test_int)); + EXPECT_TRUE( + DoesSizeTSafelyConvertToInt(std::numeric_limits<int>::max(), &test_int)); + EXPECT_EQ(std::numeric_limits<int>::max(), test_int); + EXPECT_TRUE(DoesSizeTSafelyConvertToInt(0, &test_int)); + EXPECT_EQ(0, test_int); +} + +TEST(MiscMath, GreatestCommonDivisorTest) { + const std::vector<int> a_values = {2, 10, 3, 5, 48000, 7, -2, 2, -3}; + const std::vector<int> b_values = {8, 4, 1, 10, 24000, 13, 6, -6, -9}; + const std::vector<int> expected = {2, 2, 1, 5, 24000, 1, 2, 2, 3}; + + for (size_t i = 0; i < expected.size(); ++i) { + EXPECT_EQ(expected[i], FindGcd(a_values[i], b_values[i])); + } +} + +TEST(MiscMath, NextPowTwoTest) { + const std::vector<size_t> inputs = {2, 10, 3, 5, 48000, 7, 23, 32}; + const std::vector<size_t> expected = {2, 16, 4, 8, 65536, 8, 32, 32}; + + for (size_t i = 0; i < inputs.size(); ++i) { + EXPECT_EQ(expected[i], NextPowTwo(inputs[i])); + } +} + +TEST(MiscMath, EqualSafeEqualArraysTest) { + const float kOriginalArray[3] = {0.11f, 0.22f, 0.33f}; + const float kSameArray[3] = {0.11f, 0.22f, 0.33f}; + + EXPECT_TRUE(EqualSafe(std::begin(kOriginalArray), std::end(kOriginalArray), + std::begin(kSameArray), std::end(kSameArray))); +} + +TEST(MiscMath, EqualSafeUnequalArraysTest) { + const std::vector<float> kOriginalArray{0.11f, 0.22f, 0.33f}; + const std::vector<std::vector<float>> kDifferentArrays{ + {-0.22f, 0.22f, 0.33f}, {0.11f, -0.33f, 0.33f}, {0.11f, 0.22f, -0.22f}, + {0.11f, 0.33f, -0.44f}, {0.22f, 0.22f, -0.44f}, {0.22f, 0.33f, -0.55f}}; + + for (auto& array : kDifferentArrays) { + EXPECT_FALSE(EqualSafe(std::begin(kOriginalArray), std::end(kOriginalArray), + std::begin(array), std::end(array))); + } +} + +TEST(MiscMath, FastReciprocalSqrtTest) { + const std::vector<float> kNumbers{130.0f, 13.0f, 1.3f, + 0.13f, 0.013f, 0.0013f}; + const float kSqrtEpsilon = 2e-3f; + for (auto& number : kNumbers) { + const float actual = std::sqrt(number); + const float approximate = 1.0f / FastReciprocalSqrt(number); + EXPECT_LT(std::abs(actual - approximate) / actual, kSqrtEpsilon); + } +} + +TEST(MiscMath, LinearFittingArrayDifferentSizesFails) { + const std::vector<float> x_array{1.0f, 2.0f}; + const std::vector<float> y_array{3.0f, 4.0f, 5.0f}; + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_FALSE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); +} + +TEST(MiscMath, LinearFittingFewerThanTwoPointsFails) { + const std::vector<float> x_array{1.0f}; + const std::vector<float> y_array{2.0f}; + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_FALSE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); +} + +TEST(MiscMath, LinearFittingVerticalLineFails) { + // All points line up on the y-axis. + const std::vector<float> x_array{0.0f, 0.0f, 0.0f}; + const std::vector<float> y_array{1.0f, 2.0f, 3.0f}; + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_FALSE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); +} + +TEST(MiscMath, LinearFittingHorizontalLine) { + // All points line up on the x-axis. + const std::vector<float> x_array{1.0f, 2.0f, 3.0f}; + const std::vector<float> y_array{0.0f, 0.0f, 0.0f}; + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_TRUE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); + EXPECT_FLOAT_EQ(slope, 0.0f); + EXPECT_FLOAT_EQ(intercept, 0.0f); + EXPECT_FLOAT_EQ(r_squared, 1.0f); +} + +TEST(MiscMath, LinearFittingSlopedLine) { + // All points line up on the line y = 2.0 x + 1.0. + const std::vector<float> x_array{1.0f, 2.0f, 3.0f, 4.0f}; + const std::vector<float> y_array{3.0f, 5.0f, 7.0f, 9.0f}; + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_TRUE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); + EXPECT_FLOAT_EQ(slope, 2.0f); + EXPECT_FLOAT_EQ(intercept, 1.0f); + EXPECT_FLOAT_EQ(r_squared, 1.0f); +} + +TEST(MiscMath, LinearFittingSlopedLineWithError) { + // All points lie close to the line y = 2.0 x + 1.0 with some offsets. + const std::vector<float> x_array{1.002f, 2.001f, 2.998f, 4.003f}; + const std::vector<float> y_array{3.001f, 4.998f, 7.005f, 8.996f}; + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_TRUE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); + + // Expect that the fitting is close to the line with some error. + const float error_tolerance = 1e-3f; + EXPECT_NEAR(slope, 2.0f, error_tolerance); + EXPECT_NEAR(intercept, 1.0f, error_tolerance); + EXPECT_NEAR(r_squared, 1.0f, error_tolerance); +} + +TEST(MiscMath, LinearFittingUncorrelatedPoints) { + // All points evenly distributed on a circle y^2 + x^2 = 1.0, which gives + // the worst coefficient of determination (almost zero). + const size_t num_points = 20; + std::vector<float> x_array(num_points, 0.0f); + std::vector<float> y_array(num_points, 0.0f); + for (size_t i = 0; i < num_points; ++i) { + const float theta = + kTwoPi * static_cast<float>(i) / static_cast<float>(num_points); + x_array[i] = std::cos(theta); + y_array[i] = std::sin(theta); + } + + float slope = 0.0f; + float intercept = 0.0f; + float r_squared = 0.0f; + EXPECT_TRUE(LinearLeastSquareFitting(x_array, y_array, &slope, &intercept, + &r_squared)); + EXPECT_FLOAT_EQ(r_squared, 0.0f); +} + +TEST(MiscMath, WorldRotation) { + // Test rotation around single quaternion axis. + const float kAngularRandomOffsetRad = 0.5f; + const float kAngularDifferenceRad = 0.3f; + Eigen::AngleAxisf rotation_a(kAngularRandomOffsetRad, + Eigen::Vector3f::UnitY()); + Eigen::AngleAxisf rotation_b(kAngularRandomOffsetRad + kAngularDifferenceRad, + Eigen::Vector3f::UnitY()); + EXPECT_FLOAT_EQ(WorldRotation(rotation_a).AngularDifferenceRad(rotation_b), + kAngularDifferenceRad); + + // Test rotation between axis. + Eigen::AngleAxisf rotation_c(0.0f, Eigen::Vector3f::UnitY()); + Eigen::AngleAxisf rotation_d(kPi, Eigen::Vector3f::UnitZ()); + EXPECT_FLOAT_EQ(WorldRotation(rotation_c).AngularDifferenceRad(rotation_d), + kPi); +} + +TEST(MiscMath, IntegerPow) { + const float kFloatValue = 1.5f; + const float kNegativeFloatValue = -3.3f; + const size_t kSizeTValue = 11U; + const int kIntValue = 5; + const int kNegativeIntValue = -13; + + for (int exponent = 0; exponent < 5; ++exponent) { + EXPECT_FLOAT_EQ(IntegerPow(kFloatValue, exponent), + std::pow(kFloatValue, static_cast<float>(exponent))); + EXPECT_FLOAT_EQ( + IntegerPow(kNegativeFloatValue, exponent), + std::pow(kNegativeFloatValue, static_cast<float>(exponent))); + EXPECT_EQ(IntegerPow(kSizeTValue, exponent), + std::pow(kSizeTValue, exponent)); + EXPECT_EQ(IntegerPow(kIntValue, exponent), std::pow(kIntValue, exponent)); + EXPECT_EQ(IntegerPow(kNegativeIntValue, exponent), + std::pow(kNegativeIntValue, exponent)); + } +} + +} // namespace + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/object_transform.h b/src/3rdparty/resonance-audio/resonance_audio/base/object_transform.h new file mode 100644 index 000000000..1d43e72a0 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/object_transform.h @@ -0,0 +1,31 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_OBJECT_TRANSFORM_H_ +#define RESONANCE_AUDIO_BASE_OBJECT_TRANSFORM_H_ + +#include "base/misc_math.h" + +namespace vraudio { + +struct ObjectTransform { + WorldPosition position; + WorldRotation rotation; +}; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_OBJECT_TRANSFORM_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/simd_macros.h b/src/3rdparty/resonance-audio/resonance_audio/base/simd_macros.h new file mode 100644 index 000000000..11ae40bf4 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/simd_macros.h @@ -0,0 +1,65 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_SIMD_MACROS_H_ +#define RESONANCE_AUDIO_BASE_SIMD_MACROS_H_ + +#if !defined(DISABLE_SIMD) && (defined(__x86_64__) || defined(_M_X64) || \ + defined(i386) || defined(_M_IX86)) +// SSE1 is enabled. +#include <xmmintrin.h> +typedef __m128 SimdVector; +#define SIMD_SSE +#define SIMD_LENGTH 4 +#define SIMD_MULTIPLY(a, b) _mm_mul_ps(a, b) +#define SIMD_ADD(a, b) _mm_add_ps(a, b) +#define SIMD_SUB(a, b) _mm_sub_ps(a, b) +#define SIMD_MULTIPLY_ADD(a, b, c) _mm_add_ps(_mm_mul_ps(a, b), c) +#define SIMD_SQRT(a) _mm_rcp_ps(_mm_rsqrt_ps(a)) +#define SIMD_RECIPROCAL_SQRT(a) _mm_rsqrt_ps(a) +#define SIMD_LOAD_ONE_FLOAT(p) _mm_set1_ps(p) +#elif !defined(DISABLE_SIMD) && \ + (((defined(__arm__) || defined(__TARGET_ARCH_ARM) || defined(_M_ARM)) && defined(__ARM_NEON__)) || \ + defined(_M_ARM64) || defined(__aarch64__) || defined(__ARM64__)) +// ARM NEON is enabled. +#include <arm_neon.h> +typedef float32x4_t SimdVector; +#define SIMD_NEON +#define SIMD_LENGTH 4 +#define SIMD_MULTIPLY(a, b) vmulq_f32(a, b) +#define SIMD_ADD(a, b) vaddq_f32(a, b) +#define SIMD_SUB(a, b) vsubq_f32(a, b) +#define SIMD_MULTIPLY_ADD(a, b, c) vmlaq_f32(c, a, b) +#define SIMD_SQRT(a) vrecpeq_f32(vrsqrteq_f32(a)) +#define SIMD_RECIPROCAL_SQRT(a) vrsqrteq_f32(a) +#define SIMD_LOAD_ONE_FLOAT(p) vld1q_dup_f32(&(p)) +#else +// No SIMD optimizations enabled. +#include "base/misc_math.h" +typedef float SimdVector; +#define SIMD_DISABLED +#define SIMD_LENGTH 1 +#define SIMD_MULTIPLY(a, b) ((a) * (b)) +#define SIMD_ADD(a, b) ((a) + (b)) +#define SIMD_SUB(a, b) ((a) - (b)) +#define SIMD_MULTIPLY_ADD(a, b, c) ((a) * (b) + (c)) +#define SIMD_SQRT(a) (1.0f / FastReciprocalSqrt(a)) +#define SIMD_RECIPROCAL_SQRT(a) FastReciprocalSqrt(a) +#define SIMD_LOAD_ONE_FLOAT(p) (p) +#warning "Not using SIMD optimizations!" +#endif + +#endif // RESONANCE_AUDIO_BASE_SIMD_MACROS_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils.cc b/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils.cc new file mode 100644 index 000000000..309ca7ba8 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils.cc @@ -0,0 +1,1299 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Prevent Visual Studio from complaining about std::copy_n. +#if defined(_WIN32) +#define _SCL_SECURE_NO_WARNINGS +#endif + +#include "base/simd_utils.h" + +#include <algorithm> +#include <limits> + +#include "base/constants_and_types.h" +#include "base/logging.h" +#include "base/misc_math.h" +#include "base/simd_macros.h" + + +namespace vraudio { + +namespace { + +#ifdef SIMD_NEON +// Deinterleaving operates on 8 int16s at a time. +const size_t kSixteenBitSimdLength = SIMD_LENGTH * 2; +#endif // SIMD_NEON + +// Float format of max and min values storable in an int16_t, for clamping. +const float kInt16Max = static_cast<float>(0x7FFF); +const float kInt16Min = static_cast<float>(-0x7FFF); + +// Conversion factors between float and int16_t (both directions). +const float kFloatFromInt16 = 1.0f / kInt16Max; +const float kInt16FromFloat = kInt16Max; + +// Expected SIMD alignment in bytes. +const size_t kSimdSizeBytes = 16; + +inline size_t GetNumChunks(size_t length) { return length / SIMD_LENGTH; } + +inline size_t GetLeftoverSamples(size_t length) { return length % SIMD_LENGTH; } + +template <typename T> +inline bool IsAlignedTemplated(const T* pointer) { + return reinterpret_cast<uintptr_t>(pointer) % kSimdSizeBytes == 0; +} + +#ifdef SIMD_DISABLED +// Calculates the approximate complex magnude of z = real + i * imaginary. +inline void ComplexMagnitude(float real, float imaginary, float* output) { + *output = real * real + imaginary * imaginary; + // The value of |output| is not being recalculated, simply modified. + *output = 1.0f / FastReciprocalSqrt(*output); +} +#endif // defined(SIMD_DISABLED) + +} // namespace + +bool IsAligned(const float* pointer) { + return IsAlignedTemplated<float>(pointer); +} + +bool IsAligned(const int16_t* pointer) { + return IsAlignedTemplated<int16_t>(pointer); +} + +size_t FindNextAlignedArrayIndex(size_t length, size_t type_size_bytes, + size_t memory_alignment_bytes) { + const size_t byte_length = type_size_bytes * length; + const size_t unaligned_bytes = byte_length % memory_alignment_bytes; + const size_t bytes_to_next_aligned = + (unaligned_bytes == 0) ? 0 : memory_alignment_bytes - unaligned_bytes; + return (byte_length + bytes_to_next_aligned) / type_size_bytes; +} + +void AddPointwise(size_t length, const float* input_a, const float* input_b, + float* output) { + DCHECK(input_a); + DCHECK(input_b); + DCHECK(output); + + const SimdVector* input_a_vector = + reinterpret_cast<const SimdVector*>(input_a); + const SimdVector* input_b_vector = + reinterpret_cast<const SimdVector*>(input_b); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool inputs_aligned = IsAligned(input_a) && IsAligned(input_b); + const bool output_aligned = IsAligned(output); + if (inputs_aligned && output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + output_vector[i] = SIMD_ADD(input_a_vector[i], input_b_vector[i]); + } + } else if (inputs_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector output_temp = + SIMD_ADD(input_a_vector[i], input_b_vector[i]); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_load_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_load_ps(&input_b[i * SIMD_LENGTH]); + output_vector[i] = SIMD_ADD(input_a_temp, input_b_temp); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_load_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_load_ps(&input_b[i * SIMD_LENGTH]); + const SimdVector output_temp = SIMD_ADD(input_a_temp, input_b_temp); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + output_vector[i] = SIMD_ADD(input_a_vector[i], input_b_vector[i]); + } +#endif // SIMD_SSE + + // Add samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = input_a[i] + input_b[i]; + } +} + +void SubtractPointwise(size_t length, const float* input_a, + const float* input_b, float* output) { + DCHECK(input_a); + DCHECK(input_b); + DCHECK(output); + + const SimdVector* input_a_vector = + reinterpret_cast<const SimdVector*>(input_a); + const SimdVector* input_b_vector = + reinterpret_cast<const SimdVector*>(input_b); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); + +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool inputs_aligned = IsAligned(input_a) && IsAligned(input_b); + const bool output_aligned = IsAligned(output); + if (inputs_aligned && output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + output_vector[i] = SIMD_SUB(input_b_vector[i], input_a_vector[i]); + } + } else if (inputs_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector output_temp = + SIMD_SUB(input_b_vector[i], input_a_vector[i]); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_load_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_load_ps(&input_b[i * SIMD_LENGTH]); + output_vector[i] = SIMD_SUB(input_b_temp, input_a_temp); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_load_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_load_ps(&input_b[i * SIMD_LENGTH]); + const SimdVector output_temp = SIMD_SUB(input_b_temp, input_a_temp); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + output_vector[i] = SIMD_SUB(input_b_vector[i], input_a_vector[i]); + } +#endif // SIMD_SSE + + // Subtract samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = input_b[i] - input_a[i]; + } +} + +void MultiplyPointwise(size_t length, const float* input_a, + const float* input_b, float* output) { + DCHECK(input_a); + DCHECK(input_b); + DCHECK(output); + + const SimdVector* input_a_vector = + reinterpret_cast<const SimdVector*>(input_a); + const SimdVector* input_b_vector = + reinterpret_cast<const SimdVector*>(input_b); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); + +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool inputs_aligned = IsAligned(input_a) && IsAligned(input_b); + const bool output_aligned = IsAligned(output); + if (inputs_aligned && output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + output_vector[i] = SIMD_MULTIPLY(input_a_vector[i], input_b_vector[i]); + } + } else if (inputs_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector output_temp = + SIMD_MULTIPLY(input_a_vector[i], input_b_vector[i]); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_loadu_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_loadu_ps(&input_b[i * SIMD_LENGTH]); + output_vector[i] = SIMD_MULTIPLY(input_a_temp, input_b_temp); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_loadu_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_loadu_ps(&input_b[i * SIMD_LENGTH]); + const SimdVector output_temp = SIMD_MULTIPLY(input_a_temp, input_b_temp); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + output_vector[i] = SIMD_MULTIPLY(input_a_vector[i], input_b_vector[i]); + } +#endif // SIMD_SSE + + // Multiply samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = input_a[i] * input_b[i]; + } +} + +void MultiplyAndAccumulatePointwise(size_t length, const float* input_a, + const float* input_b, float* accumulator) { + DCHECK(input_a); + DCHECK(input_b); + DCHECK(accumulator); + + const SimdVector* input_a_vector = + reinterpret_cast<const SimdVector*>(input_a); + const SimdVector* input_b_vector = + reinterpret_cast<const SimdVector*>(input_b); + SimdVector* accumulator_vector = reinterpret_cast<SimdVector*>(accumulator); + +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool inputs_aligned = IsAligned(input_a) && IsAligned(input_b); + const bool accumulator_aligned = IsAligned(accumulator); + if (inputs_aligned && accumulator_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + accumulator_vector[i] = SIMD_MULTIPLY_ADD( + input_a_vector[i], input_b_vector[i], accumulator_vector[i]); + } + } else if (inputs_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + SimdVector accumulator_temp = _mm_loadu_ps(&accumulator[i * SIMD_LENGTH]); + accumulator_temp = SIMD_MULTIPLY_ADD(input_a_vector[i], input_b_vector[i], + accumulator_temp); + _mm_storeu_ps(&accumulator[i * SIMD_LENGTH], accumulator_temp); + } + } else if (accumulator_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_loadu_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_loadu_ps(&input_b[i * SIMD_LENGTH]); + accumulator_vector[i] = + SIMD_MULTIPLY_ADD(input_a_temp, input_b_temp, accumulator_vector[i]); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_a_temp = _mm_loadu_ps(&input_a[i * SIMD_LENGTH]); + const SimdVector input_b_temp = _mm_loadu_ps(&input_b[i * SIMD_LENGTH]); + SimdVector accumulator_temp = _mm_loadu_ps(&accumulator[i * SIMD_LENGTH]); + accumulator_temp = + SIMD_MULTIPLY_ADD(input_a_temp, input_b_temp, accumulator_temp); + _mm_storeu_ps(&accumulator[i * SIMD_LENGTH], accumulator_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + accumulator_vector[i] = SIMD_MULTIPLY_ADD( + input_a_vector[i], input_b_vector[i], accumulator_vector[i]); + } +#endif // SIMD_SSE + + // Apply gain and accumulate to samples at the end that were missed by the + // SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + accumulator[i] += input_a[i] * input_b[i]; + } +} + +void ScalarMultiply(size_t length, float gain, const float* input, + float* output) { + DCHECK(input); + DCHECK(output); + + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); + + const SimdVector gain_vector = SIMD_LOAD_ONE_FLOAT(gain); +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); + if (input_aligned && output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + output_vector[i] = SIMD_MULTIPLY(gain_vector, input_vector[i]); + } + } else if (input_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector output_temp = + SIMD_MULTIPLY(gain_vector, input_vector[i]); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + output_vector[i] = SIMD_MULTIPLY(gain_vector, input_temp); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + const SimdVector output_temp = SIMD_MULTIPLY(gain_vector, input_temp); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + output_vector[i] = SIMD_MULTIPLY(gain_vector, input_vector[i]); + } +#endif // SIMD_SSE + + // Apply gain to samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = input[i] * gain; + } +} + +void ScalarMultiplyAndAccumulate(size_t length, float gain, const float* input, + float* accumulator) { + DCHECK(input); + DCHECK(accumulator); + + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + SimdVector* accumulator_vector = reinterpret_cast<SimdVector*>(accumulator); + + const SimdVector gain_vector = SIMD_LOAD_ONE_FLOAT(gain); +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool input_aligned = IsAligned(input); + const bool accumulator_aligned = IsAligned(accumulator); + if (input_aligned && accumulator_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + accumulator_vector[i] = SIMD_MULTIPLY_ADD(gain_vector, input_vector[i], + accumulator_vector[i]); + } + } else if (input_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + SimdVector accumulator_temp = _mm_loadu_ps(&accumulator[i * SIMD_LENGTH]); + accumulator_temp = + SIMD_MULTIPLY_ADD(gain_vector, input_vector[i], accumulator_temp); + _mm_storeu_ps(&accumulator[i * SIMD_LENGTH], accumulator_temp); + } + } else if (accumulator_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + accumulator_vector[i] = + SIMD_MULTIPLY_ADD(gain_vector, input_temp, accumulator_vector[i]); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + SimdVector accumulator_temp = _mm_loadu_ps(&accumulator[i * SIMD_LENGTH]); + accumulator_temp = + SIMD_MULTIPLY_ADD(gain_vector, input_temp, accumulator_temp); + _mm_storeu_ps(&accumulator[i * SIMD_LENGTH], accumulator_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + accumulator_vector[i] = + SIMD_MULTIPLY_ADD(gain_vector, input_vector[i], accumulator_vector[i]); + } +#endif // SIMD_SSE + + // Apply gain and accumulate to samples at the end that were missed by the + // SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + accumulator[i] += input[i] * gain; + } +} + +void ReciprocalSqrt(size_t length, const float* input, float* output) { + DCHECK(input); + DCHECK(output); + +#if !defined(SIMD_DISABLED) + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); +#endif // !defined(SIMD_DISABLED) + +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); + if (input_aligned && output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + output_vector[i] = SIMD_RECIPROCAL_SQRT(input_vector[i]); + } + } else if (input_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector output_temp = SIMD_RECIPROCAL_SQRT(input_vector[i]); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + output_vector[i] = SIMD_RECIPROCAL_SQRT(input_temp); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + const SimdVector output_temp = SIMD_RECIPROCAL_SQRT(input_temp); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } +#elif defined SIMD_NEON + for (size_t i = 0; i < GetNumChunks(length); ++i) { + output_vector[i] = SIMD_RECIPROCAL_SQRT(input_vector[i]); + } +#endif // SIMD_SSE + + // Apply to samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = FastReciprocalSqrt(input[i]); + } +} + +void Sqrt(size_t length, const float* input, float* output) { + DCHECK(input); + DCHECK(output); + +#if !defined(SIMD_DISABLED) + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); +#endif // !defined(SIMD_DISABLED) + +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); + if (input_aligned && output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + output_vector[i] = SIMD_SQRT(input_vector[i]); + } + } else if (input_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector output_temp = SIMD_SQRT(input_vector[i]); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + output_vector[i] = SIMD_SQRT(input_temp); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector input_temp = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + const SimdVector output_temp = SIMD_SQRT(input_temp); + _mm_storeu_ps(&output[i * SIMD_LENGTH], output_temp); + } + } +#elif defined SIMD_NEON + for (size_t i = 0; i < GetNumChunks(length); ++i) { + // This should be faster than using a sqrt method : https://goo.gl/XRKwFp + output_vector[i] = SIMD_SQRT(input_vector[i]); + } +#endif // SIMD_SSE + + // Apply to samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = 1.0f / FastReciprocalSqrt(input[i]); + } +} + +void ApproxComplexMagnitude(size_t length, const float* input, float* output) { + DCHECK(input); + DCHECK(output); + +#if !defined(SIMD_DISABLED) + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); + const size_t num_chunks = GetNumChunks(length); + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); +#endif // !defined(SIMD_DISABLED) + +#ifdef SIMD_SSE + if (input_aligned && output_aligned) { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector squared_1 = + SIMD_MULTIPLY(input_vector[first_index], input_vector[first_index]); + const SimdVector squared_2 = + SIMD_MULTIPLY(input_vector[second_index], input_vector[second_index]); + const SimdVector unshuffled_1 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(2, 0, 2, 0)); + const SimdVector unshuffled_2 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(3, 1, 3, 1)); + output_vector[out_index] = SIMD_ADD(unshuffled_1, unshuffled_2); + output_vector[out_index] = SIMD_SQRT(output_vector[out_index]); + } + } else if (input_aligned) { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector squared_1 = + SIMD_MULTIPLY(input_vector[first_index], input_vector[first_index]); + const SimdVector squared_2 = + SIMD_MULTIPLY(input_vector[second_index], input_vector[second_index]); + const SimdVector unshuffled_1 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(2, 0, 2, 0)); + const SimdVector unshuffled_2 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(3, 1, 3, 1)); + SimdVector output_temp = SIMD_ADD(unshuffled_1, unshuffled_2); + output_vector[out_index] = SIMD_SQRT(output_temp); + _mm_storeu_ps(&output[out_index * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector first_temp = + _mm_loadu_ps(&input[first_index * SIMD_LENGTH]); + const SimdVector second_temp = + _mm_loadu_ps(&input[second_index * SIMD_LENGTH]); + const SimdVector squared_1 = SIMD_MULTIPLY(first_temp, first_temp); + const SimdVector squared_2 = SIMD_MULTIPLY(second_temp, second_temp); + const SimdVector unshuffled_1 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(2, 0, 2, 0)); + const SimdVector unshuffled_2 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(3, 1, 3, 1)); + output_vector[out_index] = SIMD_ADD(unshuffled_1, unshuffled_2); + output_vector[out_index] = SIMD_SQRT(output_vector[out_index]); + } + } else { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector first_temp = + _mm_loadu_ps(&input[first_index * SIMD_LENGTH]); + const SimdVector second_temp = + _mm_loadu_ps(&input[second_index * SIMD_LENGTH]); + const SimdVector squared_1 = SIMD_MULTIPLY(first_temp, first_temp); + const SimdVector squared_2 = SIMD_MULTIPLY(second_temp, second_temp); + const SimdVector unshuffled_1 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(2, 0, 2, 0)); + const SimdVector unshuffled_2 = + _mm_shuffle_ps(squared_1, squared_2, _MM_SHUFFLE(3, 1, 3, 1)); + SimdVector output_temp = SIMD_ADD(unshuffled_1, unshuffled_2); + output_temp = SIMD_SQRT(output_temp); + _mm_storeu_ps(&output[out_index * SIMD_LENGTH], output_temp); + } + } +#elif defined SIMD_NEON + if (input_aligned && output_aligned) { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector squared_1 = + SIMD_MULTIPLY(input_vector[first_index], input_vector[first_index]); + const SimdVector squared_2 = + SIMD_MULTIPLY(input_vector[second_index], input_vector[second_index]); + const float32x4x2_t unshuffled = vuzpq_f32(squared_1, squared_2); + output_vector[out_index] = SIMD_ADD(unshuffled.val[0], unshuffled.val[1]); + output_vector[out_index] = SIMD_SQRT(output_vector[out_index]); + } + } else if (input_aligned) { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector squared_1 = + SIMD_MULTIPLY(input_vector[first_index], input_vector[first_index]); + const SimdVector squared_2 = + SIMD_MULTIPLY(input_vector[second_index], input_vector[second_index]); + const float32x4x2_t unshuffled = vuzpq_f32(squared_1, squared_2); + SimdVector output_temp = SIMD_ADD(unshuffled.val[0], unshuffled.val[1]); + output_temp = SIMD_SQRT(output_temp); + vst1q_f32(&output[out_index * SIMD_LENGTH], output_temp); + } + } else if (output_aligned) { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector first_temp = + vld1q_f32(&input[first_index * SIMD_LENGTH]); + const SimdVector second_temp = + vld1q_f32(&input[second_index * SIMD_LENGTH]); + const SimdVector squared_1 = SIMD_MULTIPLY(first_temp, first_temp); + const SimdVector squared_2 = SIMD_MULTIPLY(second_temp, second_temp); + const float32x4x2_t unshuffled = vuzpq_f32(squared_1, squared_2); + output_vector[out_index] = SIMD_ADD(unshuffled.val[0], unshuffled.val[1]); + output_vector[out_index] = SIMD_SQRT(output_vector[out_index]); + } + } else { + for (size_t out_index = 0; out_index < num_chunks; ++out_index) { + const size_t first_index = out_index * 2; + const size_t second_index = first_index + 1; + const SimdVector first_temp = + vld1q_f32(&input[first_index * SIMD_LENGTH]); + const SimdVector second_temp = + vld1q_f32(&input[second_index * SIMD_LENGTH]); + const SimdVector squared_1 = SIMD_MULTIPLY(first_temp, first_temp); + const SimdVector squared_2 = SIMD_MULTIPLY(second_temp, second_temp); + const float32x4x2_t unshuffled = vuzpq_f32(squared_1, squared_2); + SimdVector output_temp = SIMD_ADD(unshuffled.val[0], unshuffled.val[1]); + output_temp = SIMD_SQRT(output_temp); + vst1q_f32(&output[out_index * SIMD_LENGTH], output_temp); + } + } +#endif // SIMD_SSE + + // Apply to samples at the end that were missed by the SIMD chunking. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t real_index = i * 2; + const size_t imag_index = real_index + 1; + const float squared_sum = (input[real_index] * input[real_index]) + + (input[imag_index] * input[imag_index]); + output[i] = 1.0f / FastReciprocalSqrt(squared_sum); + } +} + +void ComplexInterleavedFormatFromMagnitudeAndSinCosPhase( + size_t length, const float* magnitude, const float* cos_phase, + const float* sin_phase, float* complex_interleaved_format_output) { + size_t leftover_samples = 0; +#ifdef SIMD_NEON + if (IsAligned(complex_interleaved_format_output) && IsAligned(cos_phase) && + IsAligned(sin_phase) && IsAligned(magnitude)) { + const SimdVector* cos_vec = reinterpret_cast<const SimdVector*>(cos_phase); + const SimdVector* sin_vec = reinterpret_cast<const SimdVector*>(sin_phase); + const SimdVector* magnitude_vec = + reinterpret_cast<const SimdVector*>(magnitude); + + const size_t num_chunks = GetNumChunks(length); + float32x4x2_t interleaved_pair; + + SimdVector* interleaved_vec = + reinterpret_cast<SimdVector*>(complex_interleaved_format_output); + for (size_t i = 0, j = 0; j < num_chunks; ++i, j += 2) { + interleaved_pair = vzipq_f32(cos_vec[i], sin_vec[i]); + interleaved_vec[j] = + SIMD_MULTIPLY(interleaved_pair.val[0], magnitude_vec[i]); + interleaved_vec[j + 1] = + SIMD_MULTIPLY(interleaved_pair.val[1], magnitude_vec[i]); + } + + leftover_samples = GetLeftoverSamples(length); + } +#endif // SIMD_NEON + DCHECK_EQ(leftover_samples % 2U, 0U); + for (size_t i = leftover_samples, j = leftover_samples / 2; i < length; + i += 2, ++j) { + const size_t imaginary_offset = i + 1; + complex_interleaved_format_output[i] = magnitude[j] * cos_phase[j]; + complex_interleaved_format_output[imaginary_offset] = + magnitude[j] * sin_phase[j]; + } +} + +void StereoFromMonoSimd(size_t length, const float* mono, float* left, + float* right) { + ScalarMultiply(length, kInverseSqrtTwo, mono, left); + std::copy_n(left, length, right); +} + +void MonoFromStereoSimd(size_t length, const float* left, const float* right, + float* mono) { + DCHECK(left); + DCHECK(right); + DCHECK(mono); + + const SimdVector* left_vector = reinterpret_cast<const SimdVector*>(left); + const SimdVector* right_vector = reinterpret_cast<const SimdVector*>(right); + SimdVector* mono_vector = reinterpret_cast<SimdVector*>(mono); + + const SimdVector inv_root_two_vec = SIMD_LOAD_ONE_FLOAT(kInverseSqrtTwo); +#ifdef SIMD_SSE + const size_t num_chunks = GetNumChunks(length); + const bool inputs_aligned = IsAligned(left) && IsAligned(right); + const bool mono_aligned = IsAligned(mono); + if (inputs_aligned && mono_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + mono_vector[i] = SIMD_MULTIPLY(inv_root_two_vec, + SIMD_ADD(left_vector[i], right_vector[i])); + } + } else if (inputs_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector mono_temp = SIMD_MULTIPLY( + inv_root_two_vec, SIMD_ADD(left_vector[i], right_vector[i])); + _mm_storeu_ps(&mono[i * SIMD_LENGTH], mono_temp); + } + } else if (mono_aligned) { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector left_temp = _mm_loadu_ps(&left[i * SIMD_LENGTH]); + const SimdVector right_temp = _mm_loadu_ps(&right[i * SIMD_LENGTH]); + mono_vector[i] = + SIMD_MULTIPLY(inv_root_two_vec, SIMD_ADD(left_temp, right_temp)); + } + } else { + for (size_t i = 0; i < num_chunks; ++i) { + const SimdVector left_temp = _mm_loadu_ps(&left[i * SIMD_LENGTH]); + const SimdVector right_temp = _mm_loadu_ps(&right[i * SIMD_LENGTH]); + const SimdVector mono_temp = + SIMD_MULTIPLY(inv_root_two_vec, SIMD_ADD(left_temp, right_temp)); + _mm_storeu_ps(&mono[i * SIMD_LENGTH], mono_temp); + } + } +#else + for (size_t i = 0; i < GetNumChunks(length); ++i) { + mono_vector[i] = SIMD_MULTIPLY(inv_root_two_vec, + SIMD_ADD(left_vector[i], right_vector[i])); + } +#endif // SIMD_SSE + const size_t leftover_samples = GetLeftoverSamples(length); + // Downmix samples at the end that were missed by the SIMD chunking. + DCHECK_GE(length, leftover_samples); + for (size_t i = length - leftover_samples; i < length; ++i) { + mono[i] = kInverseSqrtTwo * (left[i] + right[i]); + } +} + +#ifdef SIMD_NEON + +void Int16FromFloat(size_t length, const float* input, int16_t* output) { + DCHECK(input); + DCHECK(output); + + // if (input_aligned || output_aligned) { + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + int16x4_t* output_vector = reinterpret_cast<int16x4_t*>(output); + + // A temporary 32 bit integer vector is needed as we only have intrinsics to + // convert from 32 bit floats to 32 bit ints. Then truncate to 16 bit ints. + int32x4_t temporary_wide_vector; + SimdVector temporary_float_vector; + + const SimdVector scaling_vector = SIMD_LOAD_ONE_FLOAT(kInt16FromFloat); + + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_float_vector = SIMD_MULTIPLY(scaling_vector, input_vector[i]); + temporary_wide_vector = vcvtq_s32_f32(temporary_float_vector); + output_vector[i] = vqmovn_s32(temporary_wide_vector); + } + + // The remainder. + const size_t leftover_samples = GetLeftoverSamples(length); + DCHECK_GE(length, leftover_samples); + float temp_float; + for (size_t i = length - leftover_samples; i < length; ++i) { + temp_float = input[i] * kInt16FromFloat; + temp_float = std::min(kInt16Max, std::max(kInt16Min, temp_float)); + output[i] = static_cast<int16_t>(temp_float); + } +} + +void FloatFromInt16(size_t length, const int16_t* input, float* output) { + DCHECK(input); + DCHECK(output); + + size_t leftover_samples = length; + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); + if (input_aligned || output_aligned) { + const int16x4_t* input_vector = reinterpret_cast<const int16x4_t*>(input); + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); + + int16x4_t temporary_narrow_vector; + SimdVector temporary_float_vector; + int32x4_t temporary_wide_vector; + const SimdVector scaling_vector = SIMD_LOAD_ONE_FLOAT(kFloatFromInt16); + + if (input_aligned && output_aligned) { + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_wide_vector = vmovl_s16(input_vector[i]); + output_vector[i] = vcvtq_f32_s32(temporary_wide_vector); + output_vector[i] = SIMD_MULTIPLY(scaling_vector, output_vector[i]); + } + } else if (input_aligned) { + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_wide_vector = vmovl_s16(input_vector[i]); + temporary_float_vector = vcvtq_f32_s32(temporary_wide_vector); + temporary_float_vector = + SIMD_MULTIPLY(scaling_vector, temporary_float_vector); + vst1q_f32(&output[i * SIMD_LENGTH], temporary_float_vector); + } + } else { + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_narrow_vector = vld1_s16(&input[i * SIMD_LENGTH]); + temporary_wide_vector = vmovl_s16(temporary_narrow_vector); + output_vector[i] = vcvtq_f32_s32(temporary_wide_vector); + output_vector[i] = SIMD_MULTIPLY(scaling_vector, output_vector[i]); + } + } + leftover_samples = GetLeftoverSamples(length); + } + + // The remainder. + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = static_cast<float>(input[i]) * kFloatFromInt16; + } +} + +#elif (defined SIMD_SSE && !defined(_MSC_VER)) + +void Int16FromFloat(size_t length, const float* input, int16_t* output) { + DCHECK(input); + DCHECK(output); + + size_t leftover_samples = length; + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); + if (output_aligned) { + const SimdVector* input_vector = reinterpret_cast<const SimdVector*>(input); + __m64* output_vector = reinterpret_cast<__m64*>(output); + + const SimdVector scaling_vector = SIMD_LOAD_ONE_FLOAT(kInt16FromFloat); + const SimdVector min_vector = SIMD_LOAD_ONE_FLOAT(kInt16Min); + const SimdVector max_vector = SIMD_LOAD_ONE_FLOAT(kInt16Max); + + SimdVector temporary_float_vector; + + if (input_aligned) { + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_float_vector = SIMD_MULTIPLY(scaling_vector, input_vector[i]); + temporary_float_vector = _mm_max_ps(temporary_float_vector, min_vector); + temporary_float_vector = _mm_min_ps(temporary_float_vector, max_vector); + output_vector[i] = _mm_cvtps_pi16(temporary_float_vector); + } + } else { + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_float_vector = _mm_loadu_ps(&input[i * SIMD_LENGTH]); + temporary_float_vector = + SIMD_MULTIPLY(scaling_vector, temporary_float_vector); + temporary_float_vector = _mm_max_ps(temporary_float_vector, min_vector); + temporary_float_vector = _mm_min_ps(temporary_float_vector, max_vector); + output_vector[i] = _mm_cvtps_pi16(temporary_float_vector); + } + } + // There is no easy way to simply store the 16 bit ints so we dont have an + // |input_aligned| only case. + leftover_samples = GetLeftoverSamples(length); + } + + // The remainder. + float temp_float; + for (size_t i = length - GetLeftoverSamples(length); i < length; ++i) { + temp_float = input[i] * kInt16FromFloat; + temp_float = std::min(kInt16Max, std::max(kInt16Min, temp_float)); + output[i] = static_cast<int16_t>(temp_float); + } +} + +void FloatFromInt16(size_t length, const int16_t* input, float* output) { + DCHECK(input); + DCHECK(output); + + size_t leftover_samples = length; + const bool input_aligned = IsAligned(input); + const bool output_aligned = IsAligned(output); + if (input_aligned) { + SimdVector* output_vector = reinterpret_cast<SimdVector*>(output); + const __m64* input_vector = reinterpret_cast<const __m64*>(input); + + const SimdVector scaling_vector = SIMD_LOAD_ONE_FLOAT(kFloatFromInt16); + + if (output_aligned) { + for (size_t i = 0; i < GetNumChunks(length); ++i) { + output_vector[i] = _mm_cvtpi16_ps(input_vector[i]); + output_vector[i] = SIMD_MULTIPLY(scaling_vector, output_vector[i]); + } + } else { + SimdVector temporary_float_vector; + for (size_t i = 0; i < GetNumChunks(length); ++i) { + temporary_float_vector = _mm_cvtpi16_ps(input_vector[i]); + temporary_float_vector = + SIMD_MULTIPLY(scaling_vector, temporary_float_vector); + _mm_storeu_ps(&output[i * SIMD_LENGTH], temporary_float_vector); + } + } + // There is no easy way to simply load the 16 bit ints so we dont have an + // |output_aligned| only case. + leftover_samples = GetLeftoverSamples(length); + } + + // The remainder. + for (size_t i = length - leftover_samples; i < length; ++i) { + output[i] = static_cast<float>(input[i]) * kFloatFromInt16; + } +} + +#else // SIMD disabled or Windows build. + +void Int16FromFloat(size_t length, const float* input, int16_t* output) { + DCHECK(input); + DCHECK(output); + + float temp_float; + for (size_t i = 0; i < length; ++i) { + temp_float = input[i] * kInt16FromFloat; + temp_float = std::min(kInt16Max, std::max(kInt16Min, temp_float)); + output[i] = static_cast<int16_t>(temp_float); + } +} + +void FloatFromInt16(size_t length, const int16_t* input, float* output) { + DCHECK(input); + DCHECK(output); + + for (size_t i = 0; i < length; ++i) { + output[i] = static_cast<float>(input[i]) * kFloatFromInt16; + } +} + +#endif // SIMD_NEON + +void InterleaveStereo(size_t length, const int16_t* channel_0, + const int16_t* channel_1, int16_t* interleaved_buffer) { + DCHECK(interleaved_buffer); + DCHECK(channel_0); + DCHECK(channel_1); + + size_t leftover_samples = length; +#ifdef SIMD_NEON + if (IsAligned(interleaved_buffer) && IsAligned(channel_0) && + IsAligned(channel_1)) { + const int16x8_t* channel_0_vec = + reinterpret_cast<const int16x8_t*>(channel_0); + const int16x8_t* channel_1_vec = + reinterpret_cast<const int16x8_t*>(channel_1); + + const size_t num_chunks = length / kSixteenBitSimdLength; + int16x8x2_t interleaved_pair; + + int16x8_t* interleaved_vec = + reinterpret_cast<int16x8_t*>(interleaved_buffer); + for (size_t i = 0, j = 0; i < num_chunks; ++i, j += 2) { + interleaved_pair = vzipq_s16(channel_0_vec[i], channel_1_vec[i]); + interleaved_vec[j] = interleaved_pair.val[0]; + interleaved_vec[j + 1] = interleaved_pair.val[1]; + } + + leftover_samples = length % kSixteenBitSimdLength; + } +#endif // SIMD_NEON + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + interleaved_buffer[interleaved_index] = channel_0[i]; + interleaved_buffer[interleaved_index + 1] = channel_1[i]; + } +} + +void InterleaveStereo(size_t length, const float* channel_0, + const float* channel_1, float* interleaved_buffer) { + DCHECK(interleaved_buffer); + DCHECK(channel_0); + DCHECK(channel_1); + + size_t leftover_samples = length; +#ifdef SIMD_NEON + if (IsAligned(interleaved_buffer) && IsAligned(channel_0) && + IsAligned(channel_1)) { + const SimdVector* channel_0_vec = + reinterpret_cast<const SimdVector*>(channel_0); + const SimdVector* channel_1_vec = + reinterpret_cast<const SimdVector*>(channel_1); + + const size_t num_chunks = GetNumChunks(length); + float32x4x2_t interleaved_pair; + + SimdVector* interleaved_vec = + reinterpret_cast<SimdVector*>(interleaved_buffer); + for (size_t i = 0, j = 0; i < num_chunks; ++i, j += 2) { + interleaved_pair = vzipq_f32(channel_0_vec[i], channel_1_vec[i]); + interleaved_vec[j] = interleaved_pair.val[0]; + interleaved_vec[j + 1] = interleaved_pair.val[1]; + } + + leftover_samples = GetLeftoverSamples(length); + } +#endif // SIMD_NEON + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + interleaved_buffer[interleaved_index] = channel_0[i]; + interleaved_buffer[interleaved_index + 1] = channel_1[i]; + } +} + +void InterleaveStereo(size_t length, const float* channel_0, + const float* channel_1, int16_t* interleaved_buffer) { + DCHECK(interleaved_buffer); + DCHECK(channel_0); + DCHECK(channel_1); + + size_t leftover_samples = length; +#ifdef SIMD_NEON + if (IsAligned(interleaved_buffer) && IsAligned(channel_0) && + IsAligned(channel_1)) { + const SimdVector* channel_0_vec = + reinterpret_cast<const SimdVector*>(channel_0); + const SimdVector* channel_1_vec = + reinterpret_cast<const SimdVector*>(channel_1); + + const size_t num_chunks = GetNumChunks(length); + float32x4x2_t interleaved_pair; + int32x4_t temporary_wide_vector; + + const SimdVector scaling_vector = SIMD_LOAD_ONE_FLOAT(kInt16FromFloat); + const SimdVector min_vector = SIMD_LOAD_ONE_FLOAT(kInt16Min); + const SimdVector max_vector = SIMD_LOAD_ONE_FLOAT(kInt16Max); + + int16x4_t* interleaved_vec = + reinterpret_cast<int16x4_t*>(interleaved_buffer); + for (size_t i = 0; i < num_chunks; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + interleaved_pair = vzipq_f32(channel_0_vec[i], channel_1_vec[i]); + interleaved_pair.val[0] = + SIMD_MULTIPLY(scaling_vector, interleaved_pair.val[0]); + interleaved_pair.val[0] = vmaxq_f32(interleaved_pair.val[0], min_vector); + interleaved_pair.val[0] = vminq_f32(interleaved_pair.val[0], max_vector); + temporary_wide_vector = vcvtq_s32_f32(interleaved_pair.val[0]); + interleaved_vec[interleaved_index] = vqmovn_s32(temporary_wide_vector); + interleaved_pair.val[1] = + SIMD_MULTIPLY(scaling_vector, interleaved_pair.val[1]); + interleaved_pair.val[1] = vmaxq_f32(interleaved_pair.val[1], min_vector); + interleaved_pair.val[1] = vminq_f32(interleaved_pair.val[1], max_vector); + temporary_wide_vector = vcvtq_s32_f32(interleaved_pair.val[1]); + interleaved_vec[interleaved_index + 1] = + vqmovn_s32(temporary_wide_vector); + } + + leftover_samples = GetLeftoverSamples(length); + } +#endif // SIMD_NEON + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + interleaved_buffer[interleaved_index] = static_cast<int16_t>(std::max( + kInt16Min, std::min(kInt16Max, kInt16FromFloat * channel_0[i]))); + interleaved_buffer[interleaved_index + 1] = static_cast<int16_t>(std::max( + kInt16Min, std::min(kInt16Max, kInt16FromFloat * channel_1[i]))); + } +} + +void DeinterleaveStereo(size_t length, const int16_t* interleaved_buffer, + int16_t* channel_0, int16_t* channel_1) { + DCHECK(interleaved_buffer); + DCHECK(channel_0); + DCHECK(channel_1); + + size_t leftover_samples = length; +#ifdef SIMD_NEON + if (IsAligned(interleaved_buffer) && IsAligned(channel_0) && + IsAligned(channel_1)) { + const size_t num_chunks = length / kSixteenBitSimdLength; + leftover_samples = length % kSixteenBitSimdLength; + int16x8_t* channel_0_vec = reinterpret_cast<int16x8_t*>(channel_0); + int16x8_t* channel_1_vec = reinterpret_cast<int16x8_t*>(channel_1); + int16x8x2_t deinterleaved_pair; + const int16x8_t* interleaved_vec = + reinterpret_cast<const int16x8_t*>(interleaved_buffer); + for (size_t chunk = 0; chunk < num_chunks; ++chunk) { + const size_t interleaved_index = chunk * kNumStereoChannels; + deinterleaved_pair = vuzpq_s16(interleaved_vec[interleaved_index], + interleaved_vec[interleaved_index + 1]); + channel_0_vec[chunk] = deinterleaved_pair.val[0]; + channel_1_vec[chunk] = deinterleaved_pair.val[1]; + } + } +#endif // SIMD_NEON + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + channel_0[i] = interleaved_buffer[interleaved_index]; + channel_1[i] = interleaved_buffer[interleaved_index + 1]; + } +} + +void DeinterleaveStereo(size_t length, const float* interleaved_buffer, + float* channel_0, float* channel_1) { + DCHECK(interleaved_buffer); + DCHECK(channel_0); + DCHECK(channel_1); + + size_t leftover_samples = length; +#ifdef SIMD_NEON + if (IsAligned(interleaved_buffer) && IsAligned(channel_0) && + IsAligned(channel_1)) { + const size_t num_chunks = GetNumChunks(length); + leftover_samples = GetLeftoverSamples(length); + SimdVector* channel_0_vec = reinterpret_cast<SimdVector*>(channel_0); + SimdVector* channel_1_vec = reinterpret_cast<SimdVector*>(channel_1); + float32x4x2_t deinterleaved_pair; + + const SimdVector* interleaved_vec = + reinterpret_cast<const SimdVector*>(interleaved_buffer); + for (size_t chunk = 0; chunk < num_chunks; ++chunk) { + const size_t interleaved_index = chunk * kNumStereoChannels; + deinterleaved_pair = vuzpq_f32(interleaved_vec[interleaved_index], + interleaved_vec[interleaved_index + 1]); + channel_0_vec[chunk] = deinterleaved_pair.val[0]; + channel_1_vec[chunk] = deinterleaved_pair.val[1]; + } + } +#endif // SIMD_NEON + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + channel_0[i] = interleaved_buffer[interleaved_index]; + channel_1[i] = interleaved_buffer[interleaved_index + 1]; + } +} + +void DeinterleaveStereo(size_t length, const int16_t* interleaved_buffer, + float* channel_0, float* channel_1) { + DCHECK(interleaved_buffer); + DCHECK(channel_0); + DCHECK(channel_1); + + size_t leftover_samples = length; +#ifdef SIMD_NEON + if (IsAligned(interleaved_buffer) && IsAligned(channel_0) && + IsAligned(channel_1)) { + const size_t num_chunks = GetNumChunks(length); + leftover_samples = GetLeftoverSamples(length); + SimdVector* channel_0_vec = reinterpret_cast<SimdVector*>(channel_0); + SimdVector* channel_1_vec = reinterpret_cast<SimdVector*>(channel_1); + int16x4x2_t deinterleaved_pair; + int32x4_t temporary_wide; + const SimdVector scaling_vector = SIMD_LOAD_ONE_FLOAT(kFloatFromInt16); + + const int16x4_t* interleaved_vec = + reinterpret_cast<const int16x4_t*>(interleaved_buffer); + for (size_t chunk = 0; chunk < num_chunks; ++chunk) { + const size_t interleaved_index = chunk * kNumStereoChannels; + deinterleaved_pair = vuzp_s16(interleaved_vec[interleaved_index], + interleaved_vec[interleaved_index + 1]); + temporary_wide = vmovl_s16(deinterleaved_pair.val[0]); + channel_0_vec[chunk] = vcvtq_f32_s32(temporary_wide); + channel_0_vec[chunk] = + SIMD_MULTIPLY(scaling_vector, channel_0_vec[chunk]); + temporary_wide = vmovl_s16(deinterleaved_pair.val[1]); + channel_1_vec[chunk] = vcvtq_f32_s32(temporary_wide); + channel_1_vec[chunk] = + SIMD_MULTIPLY(scaling_vector, channel_1_vec[chunk]); + } + } +#endif // SIMD_NEON + for (size_t i = length - leftover_samples; i < length; ++i) { + const size_t interleaved_index = kNumStereoChannels * i; + channel_0[i] = static_cast<float>(interleaved_buffer[interleaved_index]) * + kFloatFromInt16; + channel_1[i] = + static_cast<float>(interleaved_buffer[interleaved_index + 1]) * + kFloatFromInt16; + } +} + +void InterleaveQuad(size_t length, const int16_t* channel_0, + const int16_t* channel_1, const int16_t* channel_2, + const int16_t* channel_3, int16_t* workspace, + int16_t* interleaved_buffer) { +#ifdef SIMD_NEON + DCHECK(IsAligned(workspace)); + const size_t double_length = length * 2; + int16_t* workspace_half_point = + workspace + FindNextAlignedArrayIndex(double_length, sizeof(int16_t), + kMemoryAlignmentBytes); + InterleaveStereo(length, channel_0, channel_2, workspace); + InterleaveStereo(length, channel_1, channel_3, workspace_half_point); + InterleaveStereo(double_length, workspace, workspace_half_point, + interleaved_buffer); +#else + for (size_t i = 0; i < length; ++i) { + const size_t interleaved_index = kNumFirstOrderAmbisonicChannels * i; + interleaved_buffer[interleaved_index] = channel_0[i]; + interleaved_buffer[interleaved_index + 1] = channel_1[i]; + interleaved_buffer[interleaved_index + 2] = channel_2[i]; + interleaved_buffer[interleaved_index + 3] = channel_3[i]; + } +#endif // SIMD_NEON +} + +void InterleaveQuad(size_t length, const float* channel_0, + const float* channel_1, const float* channel_2, + const float* channel_3, float* workspace, + float* interleaved_buffer) { +#ifdef SIMD_NEON + DCHECK(IsAligned(workspace)); + const size_t double_length = length * 2; + float* workspace_half_point = + workspace + FindNextAlignedArrayIndex(double_length, sizeof(float), + kMemoryAlignmentBytes); + DCHECK(IsAligned(workspace_half_point)); + InterleaveStereo(length, channel_0, channel_2, workspace); + InterleaveStereo(length, channel_1, channel_3, workspace_half_point); + InterleaveStereo(double_length, workspace, workspace_half_point, + interleaved_buffer); +#else + for (size_t i = 0; i < length; ++i) { + const size_t interleaved_index = kNumFirstOrderAmbisonicChannels * i; + interleaved_buffer[interleaved_index] = channel_0[i]; + interleaved_buffer[interleaved_index + 1] = channel_1[i]; + interleaved_buffer[interleaved_index + 2] = channel_2[i]; + interleaved_buffer[interleaved_index + 3] = channel_3[i]; + } +#endif // SIMD_NEON +} + +void DeinterleaveQuad(size_t length, const int16_t* interleaved_buffer, + int16_t* workspace, int16_t* channel_0, + int16_t* channel_1, int16_t* channel_2, + int16_t* channel_3) { +#ifdef SIMD_NEON + DCHECK(IsAligned(workspace)); + const size_t double_length = length * 2; + int16_t* workspace_half_point = + workspace + FindNextAlignedArrayIndex(double_length, sizeof(int16_t), + kMemoryAlignmentBytes); + DCHECK(IsAligned(workspace_half_point)); + DeinterleaveStereo(double_length, interleaved_buffer, workspace, + workspace_half_point); + DeinterleaveStereo(length, workspace, channel_0, channel_2); + DeinterleaveStereo(length, workspace_half_point, channel_1, channel_3); +#else + for (size_t i = 0; i < length; ++i) { + const size_t interleaved_index = kNumFirstOrderAmbisonicChannels * i; + channel_0[i] = interleaved_buffer[interleaved_index]; + channel_1[i] = interleaved_buffer[interleaved_index + 1]; + channel_2[i] = interleaved_buffer[interleaved_index + 2]; + channel_3[i] = interleaved_buffer[interleaved_index + 3]; + } +#endif // SIMD_NEON +} + +void DeinterleaveQuad(size_t length, const float* interleaved_buffer, + float* workspace, float* channel_0, float* channel_1, + float* channel_2, float* channel_3) { +#ifdef SIMD_NEON + DCHECK(IsAligned(workspace)); + const size_t double_length = length * 2; + float* workspace_half_point = + workspace + FindNextAlignedArrayIndex(double_length, sizeof(float), + kMemoryAlignmentBytes); + DCHECK(IsAligned(workspace_half_point)); + DeinterleaveStereo(double_length, interleaved_buffer, workspace, + workspace_half_point); + DeinterleaveStereo(length, workspace, channel_0, channel_2); + DeinterleaveStereo(length, workspace_half_point, channel_1, channel_3); +#else + for (size_t i = 0; i < length; ++i) { + const size_t interleaved_index = kNumFirstOrderAmbisonicChannels * i; + channel_0[i] = interleaved_buffer[interleaved_index]; + channel_1[i] = interleaved_buffer[interleaved_index + 1]; + channel_2[i] = interleaved_buffer[interleaved_index + 2]; + channel_3[i] = interleaved_buffer[interleaved_index + 3]; + } +#endif // SIMD_NEON +} + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils.h b/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils.h new file mode 100644 index 000000000..64fb9c6d1 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils.h @@ -0,0 +1,296 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_SIMD_UTILS_H_ +#define RESONANCE_AUDIO_BASE_SIMD_UTILS_H_ + +#include <cstddef> +#include <cstdint> + +namespace vraudio { + +// Checks if the pointer provided is correctly aligned for SIMD. +// +// @param pointer Pointer to check. +// @return True if the pointer is correctly aligned. +bool IsAligned(const float* pointer); +bool IsAligned(const int16_t* pointer); + +// Rounds a number of frames up to the next aligned memory address +// based on |memory_alignment_bytes|. This allows for aligning offset pointers +// into a single chunk of allocated memory. +// +// @param length Number of samples before the desired offset pointer. +// @param type_size_bytes Size of the type of each entry in the array. +// @param memory_alignment_bytes Number of bytes to which an address is aligned. +// @return Number of samples into the memory chunk to ensure aligned memory. +size_t FindNextAlignedArrayIndex(size_t length, size_t type_size_bytes, + size_t memory_alignment_bytes); + +// Adds a float array |input_a| to another float array |input_b| and stores the +// result in |output|. +// +// @param length Number of floats. +// @param input_a Pointer to the first float in input_a array. +// @param input_b Pointer to the first float in input_b array. +// @param output Pointer to the first float in output array. +void AddPointwise(size_t length, const float* input_a, const float* input_b, + float* output); + +// Subtracts a float array |input|, pointwise from another float array |output|. +// +// @param length Number of floats. +// @param input Pointer to the first float in input_a array. +// @param output Pointer to the first float in input_b array. +// @param output Pointer to the first float in output array. +void SubtractPointwise(size_t length, const float* input_a, + const float* input_b, float* output); + +// Pointwise multiplies a float array |input_a| with another float array +// |input_b| and stores the result in |output|. +// +// @param length Number of floats. +// @param input Pointer to the first float in input_a array. +// @param input Pointer to the first float in input_b array. +// @param output Pointer to the first float in output array. +void MultiplyPointwise(size_t length, const float* input_a, + const float* input_b, float* output); + +// Pointwise multiplies a float array |input_a| with another float array +// |input_b| and adds the result onto |accumulator|. +// +// @param length Number of floats. +// @param input_a Pointer to the first float in input_a array. +// @param input_b Pointer to the first float in input_b array. +// @param accumulator Pointer to the first float in accumulator array. +void MultiplyAndAccumulatePointwise(size_t length, const float* input_a, + const float* input_b, float* accumulator); + +// Multiplies a float array |input| by a scalar |gain| over |length| samples. +// +// @param length Number of floats. +// @param gain Scalar value with which to multiply the input. +// @param input Pointer to the first float in input array. +// @param output Pointer to the first float in output array. +void ScalarMultiply(size_t length, float gain, const float* input, + float* output); + +// Multiplies a float array |input| by a scalar |gain| over |length| samples and +// adds the result onto |accumulator|. +// +// @param length Number of floats. +// @param gain Scalar value with which to multiply the input. +// @param input Pointer to the first float in input array. +// @param output Pointer to the first float in accumulator array. +void ScalarMultiplyAndAccumulate(size_t length, float gain, const float* input, + float* accumulator); + +// Calculates an approximmate reciprocal square root. +// +// @param length Number of floats. +// @param input Pointer to the first float in input array. +// @param output Pointer to the first float in output array. +void ReciprocalSqrt(size_t length, const float* input, float* output); + +// Calculates an approximate square root. +// +// @param length Number of floats. +// @param input Pointer to the first float in input array. +// @param output Pointer to the first float in output array. +void Sqrt(size_t length, const float* input, float* output); + +// Calculates the approximate magnitudes of interleaved complex numbers. +// +// @param length Number of complex numbers in the input array, +// (i.e. half its length). +// @param input Pointer to the first float in input array. Length: 2 * |length|. +// @param output Pointer to the first float in output array, Length: |length|. +void ApproxComplexMagnitude(size_t length, const float* input, float* output); + +// Calculates the complex values in interleaved format (real, imaginary), from a +// vector of magnitudes and of sines and cosines of phase. +// +// @param length Number of total entries (real & imaginary) in the input array. +// @param magnitude Pointer to the first float in the magnitude array, Length: +// |length| / 2 +// @param cos_phase Pointer to the first float in the cosine phase array, +// Length: |length| / 2 +// @param sin_phase Pointer to the first float in the sine phase array, Length: +// |length| / 2 +// @param complex_interleaved_format_output Pointer to the first float in the +// output array. Length: |length|. +void ComplexInterleavedFormatFromMagnitudeAndSinCosPhase( + size_t length, const float* magnitude, const float* cos_phase, + const float* sin_phase, float* complex_interleaved_format_output); + +// Generates an identical left and right pair of stereo channels from a mono +// input channel, where each channel is the mono channel times 1/sqrt(2). +// +// @param length Number of floats. +// @param mono Pointer to the first float in an input mono array. +// @param left Pointer to the first float in the left output array. +// @param right Pointer to the first float in the right output array. +void StereoFromMonoSimd(size_t length, const float* mono, float* left, + float* right); + +// Generates a mono downmix from a pair of stereo channels, where the output is +// equal to the sum of the two inputs times 1/sqrt(2). +// +// @param length Number of floats. +// @param left Pointer to the first float in the left input array. +// @param right Pointer to the first float in the right input array. +// @param mono Pointer to the first float in an output mono array. +void MonoFromStereoSimd(size_t length, const float* left, const float* right, + float* mono); + +// Converts an array of 32 bit float input to clamped 16 bit int output. +// +// @param length Number of floats in the input array and int16_ts in the output. +// @param input Float array. +// @param output Int array. +void Int16FromFloat(size_t length, const float* input, int16_t* output); + +// Converts an array of 16 bit int input to 32 bit float output. +// +// @param length Number of int16_ts in the input array and floats in the output. +// @param input Int array. +// @param output Float array. +void FloatFromInt16(size_t length, const int16_t* input, float* output); + +// Interleaves a pair of mono buffers of int_16 data into a stereo buffer. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be twice this size. +// @param channel_0 Input buffer of mono data for the first channel. +// @param channel_1 Input buffer of mono data for the second channel. +// @param interleaved_buffer Output buffer of stereo interleaved data. +void InterleaveStereo(size_t length, const int16_t* channel_0, + const int16_t* channel_1, int16_t* interleaved_buffer); + +// Interleaves a pair of mono buffers of float data into a stereo buffer. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be twice this size. +// @param channel_0 Input buffer of mono data for the first channel. +// @param channel_1 Input buffer of mono data for the second channel. +// @param interleaved_buffer Output buffer of stereo interleaved data. +void InterleaveStereo(size_t length, const float* channel_0, + const float* channel_1, float* interleaved_buffer); + +// Interleaves a pair of mono buffers of float data into a stereo buffer of +// int16_t data. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be twice this size. +// @param channel_0 Input buffer of mono data for the first channel (float). +// @param channel_1 Input buffer of mono data for the second channel (float). +// @param interleaved_buffer Output buffer of stereo interleaved data (int16_t). +void InterleaveStereo(size_t length, const float* channel_0, + const float* channel_1, int16_t* interleaved_buffer); + +// Deinterleaves a stereo buffer of int16_t data into a pair of mono buffers. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be twice this size. +// @param interleaved_buffer Input buffer of stereo interleaved data. +// @param channel_0 Output buffer of mono data for the first channel. +// @param channel_1 Output buffer of mono data for the second channel. +void DeinterleaveStereo(size_t length, const int16_t* interleaved_buffer, + int16_t* channel_0, int16_t* channel_1); + +// Deinterleaves a stereo buffer of float data into a pair of mono buffers. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be twice this size. +// @param interleaved_buffer Input buffer of stereo interleaved data. +// @param channel_0 Output buffer of mono data for the first channel. +// @param channel_1 Output buffer of mono data for the second channel. +void DeinterleaveStereo(size_t length, const float* interleaved_buffer, + float* channel_0, float* channel_1); + +// Deinterleaves a stereo buffer of int16_t data into a pair of mono float +// buffers, performing the int16 to floating point conversion. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be twice this size. +// @param interleaved_buffer Input buffer of stereo interleaved data (int16_t). +// @param channel_0 Output buffer of mono data for the first channel (float). +// @param channel_1 Output buffer of mono data for the second channel (float). +void DeinterleaveStereo(size_t length, const int16_t* interleaved_buffer, + float* channel_0, float* channel_1); + +// Interleaves four mono buffers of int16_t data into a quad buffer. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be four times this size and the workspace must be five times this size. +// @param channel_0 Input buffer of mono data for the first channel. +// @param channel_1 Input buffer of mono data for the second channel. +// @param channel_2 Input buffer of mono data for the third channel. +// @param channel_3 Input buffer of mono data for the fourth channel. +// @param workspace Aligned buffer of 5 * |length| samples in length. +// @param interleaved_buffer Output buffer of quad interleaved data. +void InterleaveQuad(size_t length, const int16_t* channel_0, + const int16_t* channel_1, const int16_t* channel_2, + const int16_t* channel_3, int16_t* workspace, + int16_t* interleaved_buffer); + +// Interleaves four mono buffers of float data into a quad buffer. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be four times this size and the workspace must be five times this size. +// @param channel_0 Input buffer of mono data for the first channel. +// @param channel_1 Input buffer of mono data for the second channel. +// @param channel_2 Input buffer of mono data for the third channel. +// @param channel_3 Input buffer of mono data for the fourth channel. +// @param workspace Aligned buffer of 5 * |length| samples in length. +// @param interleaved_buffer Output buffer of quad interleaved data. +void InterleaveQuad(size_t length, const float* channel_0, + const float* channel_1, const float* channel_2, + const float* channel_3, float* workspace, + float* interleaved_buffer); + +// Deinterleaves a quad buffer of int16_t data into four mono buffers. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be four times this size and the workspace must be five times this size. +// @param interleaved_buffer Input buffer of quad interleaved data. +// @param workspace Aligned buffer of 5 * |length| samples in length. +// @param channel_0 Output buffer of mono data for the first channel. +// @param channel_1 Output buffer of mono data for the second channel. +// @param channel_2 Output buffer of mono data for the third channel. +// @param channel_3 Output buffer of mono data for the fourth channel. +void DeinterleaveQuad(size_t length, const int16_t* interleaved_buffer, + int16_t* workspace, int16_t* channel_0, + int16_t* channel_1, int16_t* channel_2, + int16_t* channel_3); + +// Deinterleaves a quad buffer of float data into four mono buffers. +// +// @param length Number of frames per mono channel. The interleaved buffer must +// be four times this size and the workspace must be five times this size. +// @param interleaved_buffer Input buffer of quad interleaved data. +// @param workspace Aligned buffer of 5 * |length| samples in length. +// @param channel_0 Output buffer of mono data for the first channel. +// @param channel_1 Output buffer of mono data for the second channel. +// @param channel_2 Output buffer of mono data for the third channel. +// @param channel_3 Output buffer of mono data for the fourth channel. +void DeinterleaveQuad(size_t length, const float* interleaved_buffer, + float* workspace, float* channel_0, float* channel_1, + float* channel_2, float* channel_3); + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_SIMD_UTILS_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils_test.cc b/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils_test.cc new file mode 100644 index 000000000..7fb8e83b5 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/simd_utils_test.cc @@ -0,0 +1,711 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/simd_utils.h" + +#include <algorithm> +#include <cmath> +#include <cstdint> +#include <vector> + +#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "base/audio_buffer.h" +#include "base/constants_and_types.h" + +namespace vraudio { + +namespace { + +// Input lengths (purposefully chosen not to be a multiple of SIMD_LENGTH). +const size_t kInputSize = 7; +const size_t kNumTestChannels = 3; +const size_t kNumQuadChannels = 4; + +// Length of deinterleaved and interleaved buffers. +const size_t kHalfSize = 21; +const size_t kFullSize = kHalfSize * 2; +const size_t kQuadSize = kHalfSize * 4; +const size_t kPentSize = kHalfSize * 5; + +// The int16 values for the deinterleaving test. +const int16_t kOne = 0x0001; +const int16_t kTwo = 0x0002; +const int16_t kThree = 0x0003; +const int16_t kFour = 0x0004; +const int16_t kMax = 0x7FFF; +const int16_t kMin = -0x7FFF; + +// Epsilon for conversion from int16_t back to float. +const float kFloatEpsilon = 1e-4f; +const int16_t kIntEpsilon = 1; + +// Intereleaved data. +const int16_t kInterleavedInput[kFullSize] = { + kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, + kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, + kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, + kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo, kOne, kTwo}; + +// Corresponding values for float and 16 bit int. +const float kFloatInput[kInputSize] = {0.5f, -0.5f, 1.0f, -1.0f, + 1.0f, -1.0f, 0.0f}; +const int16_t kIntInput[kInputSize] = {0x4000, -0x4000, 0x7FFF, -0x7FFF, + 0x7FFF, -0x7FFF, 0}; + +TEST(SimdUtilsTest, IsAlignedTest) { + AudioBuffer aligned_audio_buffer(kNumMonoChannels, kInputSize); + const float* aligned_ptr = aligned_audio_buffer[0].begin(); + const float* unaligned_ptr = aligned_ptr + 1; + EXPECT_TRUE(IsAligned(aligned_ptr)); + EXPECT_FALSE(IsAligned(unaligned_ptr)); +} + +TEST(SimdUtilsTest, AddPointwiseTest) { + const float kResult = 3.0f; + AudioBuffer aligned_audio_buffer(kNumTestChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = 1.0f; + aligned_audio_buffer[1][i] = 2.0f; + } + AddPointwise(kInputSize, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0], &aligned_audio_buffer[2][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[2][i], kResult); + } +} + +TEST(SimdUtilsTest, AddPointwiseInPlaceTest) { + AudioBuffer aligned_audio_buffer(kNumStereoChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = static_cast<float>(i); + } + const size_t kRuns = 2; + for (size_t i = 0; i < kRuns; ++i) { + AddPointwise(kInputSize, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0], &aligned_audio_buffer[1][0]); + } + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[1][i], static_cast<float>(i * kRuns)); + } +} + +TEST(SimdUtilsTest, SubtractPointwiseTest) { + AudioBuffer aligned_audio_buffer(kNumStereoChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = static_cast<float>(i); + aligned_audio_buffer[1][i] = static_cast<float>(2 * i); + } + SubtractPointwise(kInputSize, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0], &aligned_audio_buffer[1][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[1][i], aligned_audio_buffer[0][i]); + } +} + +TEST(SimdUtilsTest, MultiplyPointwiseTest) { + AudioBuffer aligned_audio_buffer(kNumStereoChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = static_cast<float>(i); + aligned_audio_buffer[1][i] = static_cast<float>(i); + } + MultiplyPointwise(kInputSize, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0], &aligned_audio_buffer[1][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[1][i], + aligned_audio_buffer[0][i] * aligned_audio_buffer[0][i]); + } +} + +TEST(SimdUtilsTest, MultiplyAndAccumulatePointwiseTest) { + const float kInitialOutput = 1.0f; + AudioBuffer aligned_input_buffer(kNumStereoChannels, kInputSize); + aligned_input_buffer.Clear(); + AudioBuffer aligned_output_buffer(kNumMonoChannels, kInputSize); + aligned_output_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_input_buffer[0][i] = static_cast<float>(i); + aligned_input_buffer[1][i] = static_cast<float>(i); + aligned_output_buffer[0][i] = kInitialOutput; + } + MultiplyAndAccumulatePointwise(kInputSize, &aligned_input_buffer[0][0], + &aligned_input_buffer[1][0], + &aligned_output_buffer[0][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_output_buffer[0][i], + kInitialOutput + aligned_input_buffer[0][i] * + aligned_input_buffer[1][i]); + } +} + +TEST(SimdUtilsTest, ScalarMultiplyTest) { + AudioBuffer aligned_audio_buffer(kNumStereoChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = 1.0f; + aligned_audio_buffer[1][i] = 0.0f; + } + const float gain = 0.5f; + ScalarMultiply(kInputSize, gain, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[1][i], + aligned_audio_buffer[0][i] * gain); + } +} + +TEST(SimdUtilsTest, ScalarMultiplyAndAccumuateTest) { + const float kResult = 2.0f; + AudioBuffer aligned_audio_buffer(kNumStereoChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = 0.5f; + aligned_audio_buffer[1][i] = 1.0f; + } + const float gain = 2.0f; + ScalarMultiplyAndAccumulate(kInputSize, gain, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[1][i], kResult); + } +} + +TEST(SimdUtilsTest, SqrtTest) { + const std::vector<float> kNumbers{130.0f, 13.0f, 1.3f, + 0.13f, 0.013f, 0.0013f}; + AudioBuffer numbers(kNumMonoChannels, kNumbers.size()); + AudioBuffer approximate(kNumMonoChannels, kNumbers.size()); + numbers[0] = kNumbers; + const float kSqrtEpsilon = 2e-3f; + + Sqrt(kNumbers.size(), numbers[0].begin(), approximate[0].begin()); + + for (size_t i = 0; i < kNumbers.size(); ++i) { + const float actual = std::sqrt(kNumbers[i]); + EXPECT_LT(std::abs(actual - approximate[0][i]) / actual, kSqrtEpsilon); + } +} + +TEST(SimdUtilsTest, ReciprocalSqrtTest) { + const std::vector<float> kNumbers{130.0f, 13.0f, 1.3f, + 0.13f, 0.013f, 0.0013f}; + AudioBuffer numbers(kNumMonoChannels, kNumbers.size()); + AudioBuffer sqrt(kNumMonoChannels, kNumbers.size()); + AudioBuffer recip_sqrt(kNumMonoChannels, kNumbers.size()); + + Sqrt(kNumbers.size(), numbers[0].begin(), sqrt[0].begin()); + ReciprocalSqrt(kNumbers.size(), numbers[0].begin(), recip_sqrt[0].begin()); + + for (size_t i = 0; i < kNumbers.size(); ++i) { + EXPECT_FLOAT_EQ(1.0f / recip_sqrt[0][i], sqrt[0][i]); + } +} + +// Tests that the correct complex magnitudes are calculated for a range of +// complex numbers with both positive and negative imaginary part. +TEST(SimdUtilsTest, ApproxComplexMagnitudeTest) { + const size_t kFramesPerBuffer = 17; + // Check that we are correct to within 0.5% of each value. + const float kErrEpsilon = 5e-3f; + AudioBuffer complex_buffer(kNumMonoChannels, 2 * kFramesPerBuffer); + for (size_t i = 0; i < kFramesPerBuffer; ++i) { + const size_t j = 2 * i; + complex_buffer[0][j] = static_cast<float>(i); + complex_buffer[0][j + 1] = ((i % 2) ? -1.0f : 1.0f) * static_cast<float>(i); + } + AudioBuffer magnitude_buffer(kNumMonoChannels, kFramesPerBuffer); + + ApproxComplexMagnitude(kFramesPerBuffer, complex_buffer[0].begin(), + magnitude_buffer[0].begin()); + + for (size_t sample = 0; sample < kFramesPerBuffer; ++sample) { + const float expected = static_cast<float>(sample) * kSqrtTwo; + // Check its correct to within 0.5%. + EXPECT_NEAR(magnitude_buffer[0][sample], expected, kErrEpsilon * expected); + } +} + +// Tests that the ComplexInterleavedFormatFromMagnitudeAndSinCosPhase() method +// correctly recovers the frequency response from magnitude and phase. +TEST(SimdUtilsTest, ComplexInterleavedFormatFromMagnitudeAndSinCosPhaseTest) { + // The folowing vectors contain the inverse sines and cosines of the numbers + // 0 to 0.75 in steps of 0.05 (calculated in MATLAB). + const size_t kLength = 16; + AudioBuffer cos_vec(kNumMonoChannels, kLength); + cos_vec[0] = {1.5708f, 1.5208f, 1.4706f, 1.4202f, 1.3694f, 1.3181f, + 1.2661f, 1.2132f, 1.1593f, 1.1040f, 1.0472f, 0.9884f, + 0.9273f, 0.8632f, 0.7954f, 0.7227f}; + AudioBuffer sin_vec(kNumMonoChannels, kLength); + sin_vec[0] = {0.0000f, 0.0500f, 0.1002f, 0.1506f, 0.2014f, 0.2527f, + 0.3047f, 0.3576f, 0.4115f, 0.4668f, 0.5236f, 0.5824f, + 0.6435f, 0.7076f, 0.7754f, 0.8481f}; + const float kMagnitude = 10.0f; + AudioBuffer magnitude(kNumMonoChannels, kLength); + std::fill(magnitude[0].begin(), magnitude[0].end(), kMagnitude); + const size_t output_size = 2 * sin_vec.num_frames(); + AudioBuffer output(kNumMonoChannels, output_size); + output.Clear(); + + ComplexInterleavedFormatFromMagnitudeAndSinCosPhase( + output_size, &magnitude[0][0], &cos_vec[0][0], &sin_vec[0][0], + &output[0][0]); + + for (size_t i = 0, j = 0; i < output_size; i += 2, ++j) { + EXPECT_FLOAT_EQ(output[0][i], kMagnitude * cos_vec[0][j]); + EXPECT_FLOAT_EQ(output[0][i + 1], kMagnitude * sin_vec[0][j]); + } +} + +TEST(SimdUtilsTest, StereoMonoTest) { + const float kResult = 2.0f / std::sqrt(2.0f); + AudioBuffer aligned_audio_buffer(kNumTestChannels, kInputSize); + aligned_audio_buffer.Clear(); + for (size_t i = 0; i < kInputSize; ++i) { + aligned_audio_buffer[0][i] = 1.0f; + aligned_audio_buffer[1][i] = 1.0f; + } + MonoFromStereoSimd(kInputSize, &aligned_audio_buffer[0][0], + &aligned_audio_buffer[1][0], &aligned_audio_buffer[2][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[2][i], kResult); + } + // Perform inverse operation. + StereoFromMonoSimd(kInputSize, &aligned_audio_buffer[2][0], + &aligned_audio_buffer[0][0], &aligned_audio_buffer[1][0]); + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_FLOAT_EQ(aligned_audio_buffer[0][i], 1.0f); + EXPECT_FLOAT_EQ(aligned_audio_buffer[1][i], 1.0f); + } +} + +TEST(SimdUtilsTest, InterleaveAlignedInt16Test) { + AudioBuffer::AlignedInt16Vector interleaved(kFullSize); + AudioBuffer::AlignedInt16Vector channel_0(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_1(kHalfSize); + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = kOne; + channel_1[i] = kTwo; + } + + InterleaveStereo(kHalfSize, channel_0.data(), channel_1.data(), + interleaved.data()); + + for (size_t i = 0; i < kFullSize; ++i) { + const int16_t value = (i % 2 == 0) ? kOne : kTwo; + EXPECT_EQ(interleaved[i], value); + } +} + +TEST(SimdUtilsTest, InterleaveUnalignedInt16Test) { + AudioBuffer::AlignedInt16Vector interleaved(kFullSize); + AudioBuffer::AlignedInt16Vector channel_0(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_1(kHalfSize); + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = kOne; + channel_1[i] = kTwo; + } + + InterleaveStereo(kHalfSize, channel_0.data(), channel_1.data(), + interleaved.data()); + + for (size_t i = 0; i < kFullSize; ++i) { + const int16_t value = (i % 2 == 0) ? kOne : kTwo; + EXPECT_EQ(interleaved[i], value); + } +} + +TEST(SimdUtilsTest, DeinterleaveAlignedInt16Test) { + AudioBuffer::AlignedInt16Vector interleaved(kFullSize); + AudioBuffer::AlignedInt16Vector channel_0(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_1(kHalfSize); + + // Fill the aligned input buffer. + for (size_t i = 0; i < kFullSize; ++i) { + interleaved[i] = kInterleavedInput[i]; + } + + // Clear the output buffers. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = static_cast<int16_t>(0); + channel_1[i] = static_cast<int16_t>(0); + } + + // Test the case where input is aligned. + DeinterleaveStereo(kHalfSize, interleaved.data(), channel_0.data(), + channel_1.data()); + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_EQ(channel_0[i], kOne); + EXPECT_EQ(channel_1[i], kTwo); + } +} + +TEST(SimdUtilsTest, DeinterleaveUnalignedInt16Test) { + AudioBuffer::AlignedInt16Vector channel_0(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_1(kHalfSize); + + // Clear the output buffers. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = static_cast<int16_t>(0); + channel_1[i] = static_cast<int16_t>(0); + } + + // Test the case where input is unaligned. + DeinterleaveStereo(kHalfSize, kInterleavedInput, channel_0.data(), + channel_1.data()); + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_EQ(channel_0[i], kOne); + EXPECT_EQ(channel_1[i], kTwo); + } +} + +TEST(SimdUtilsTest, DeinterleaveAlignedInt16ConvertToFloatTest) { + AudioBuffer::AlignedInt16Vector interleaved(kFullSize); + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kFullSize; ++i) { + interleaved[i] = i % 2 ? kMin : kMax; + } + + // Clear the output buffers. + planar.Clear(); + + // Test the case where input is aligned. + DeinterleaveStereo(kHalfSize, interleaved.data(), channel_0.begin(), + channel_1.begin()); + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_NEAR(channel_0[i], 1.0f, kEpsilonFloat); + EXPECT_NEAR(channel_1[i], -1.0f, kEpsilonFloat); + } +} + +TEST(SimdUtilsTest, DeinterleaveUnalignedInt16ConvertToFloatTest) { + int16_t interleaved[kFullSize]; + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the unaligned input buffer. + for (size_t i = 0; i < kFullSize; ++i) { + interleaved[i] = i % 2 ? kMin : kMax; + } + + // Clear the output buffers. + planar.Clear(); + + // Test the case where input is unaligned. + DeinterleaveStereo(kHalfSize, interleaved, channel_0.begin(), + channel_1.begin()); + + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_NEAR(channel_0[i], 1.0f, kEpsilonFloat); + EXPECT_NEAR(channel_1[i], -1.0f, kEpsilonFloat); + } +} + +TEST(SimdUtilsTest, InterleaveAlignedFloatTest) { + AudioBuffer interleaved(kNumMonoChannels, kFullSize); + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = 1.0f; + channel_1[i] = 2.0f; + } + + InterleaveStereo(kHalfSize, channel_0.begin(), channel_1.begin(), + interleaved[0].begin()); + + for (size_t i = 0; i < kFullSize; ++i) { + const float value = (i % 2 == 0) ? 1.0f : 2.0f; + EXPECT_FLOAT_EQ(interleaved[0][i], value); + } +} + +TEST(SimdUtilsTest, InterleaveUnalignedFloatTest) { + float interleaved[kFullSize]; + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = 1.0f; + channel_1[i] = 2.0f; + } + + InterleaveStereo(kHalfSize, channel_0.begin(), channel_1.begin(), + interleaved); + + for (size_t i = 0; i < kFullSize; ++i) { + const float value = (i % 2 == 0) ? 1.0f : 2.0f; + EXPECT_FLOAT_EQ(interleaved[i], value); + } +} + +TEST(SimdUtilsTest, InterleaveAlignedFloatConvertToInt16Test) { + AudioBuffer::AlignedInt16Vector interleaved(kFullSize); + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = 1.0f; + channel_1[i] = -1.0f; + } + + InterleaveStereo(kHalfSize, channel_0.begin(), channel_1.begin(), + interleaved.data()); + + for (size_t i = 0; i < kFullSize; ++i) { + const int16_t value = i % 2 ? kMin : kMax; + EXPECT_NEAR(interleaved[i], value, kIntEpsilon); + } +} + +TEST(SimdUtilsTest, InterleaveUnalignedFloatConvertToInt16Test) { + int16_t interleaved[kFullSize]; + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = 1.0f; + channel_1[i] = -1.0f; + } + + InterleaveStereo(kHalfSize, channel_0.begin(), channel_1.begin(), + interleaved); + + for (size_t i = 0; i < kFullSize; ++i) { + const int16_t value = i % 2 ? kMin : kMax; + EXPECT_NEAR(interleaved[i], value, kIntEpsilon); + } +} + +TEST(SimdUtilsTest, DeinterleaveAlignedFloatTest) { + AudioBuffer interleaved(kNumMonoChannels, kFullSize); + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kFullSize; ++i) { + interleaved[0][i] = (i % 2 == 0) ? 1.0f : 2.0f; + } + + // Clear the output buffers. + planar.Clear(); + + // Test the case where input is aligned. + DeinterleaveStereo(kHalfSize, interleaved[0].begin(), channel_0.begin(), + channel_1.begin()); + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_FLOAT_EQ(channel_0[i], 1.0f); + EXPECT_FLOAT_EQ(channel_1[i], 2.0f); + } +} + +TEST(SimdUtilsTest, DeinterleaveUnalignedFloatTest) { + float interleaved[kFullSize]; + AudioBuffer planar(kNumStereoChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kFullSize; ++i) { + interleaved[i] = (i % 2 == 0) ? 1.0f : 2.0f; + } + + // Clear the output buffers. + planar.Clear(); + + // Test the case where input is unaligned. + DeinterleaveStereo(kHalfSize, interleaved, channel_0.begin(), + channel_1.begin()); + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_FLOAT_EQ(channel_0[i], 1.0f); + EXPECT_FLOAT_EQ(channel_1[i], 2.0f); + } +} + +TEST(SimdUtilsTest, InterleaveQuadInt16Test) { + AudioBuffer::AlignedInt16Vector interleaved(kQuadSize); + AudioBuffer::AlignedInt16Vector workspace(kPentSize); + AudioBuffer::AlignedInt16Vector channel_0(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_1(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_2(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_3(kHalfSize); + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = kOne; + channel_1[i] = kTwo; + channel_2[i] = kThree; + channel_3[i] = kFour; + } + + InterleaveQuad(kHalfSize, channel_0.data(), channel_1.data(), + channel_2.data(), channel_3.data(), workspace.data(), + interleaved.data()); + + for (size_t i = 0; i < kQuadSize; ++i) { + const int16_t value = static_cast<int16_t>(1 + (i % kNumQuadChannels)); + EXPECT_FLOAT_EQ(interleaved[i], value); + } +} + +TEST(SimdUtilsTest, DeinterleaveQuadInt16Test) { + AudioBuffer::AlignedInt16Vector interleaved(kQuadSize); + AudioBuffer::AlignedInt16Vector workspace(kPentSize); + AudioBuffer::AlignedInt16Vector channel_0(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_1(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_2(kHalfSize); + AudioBuffer::AlignedInt16Vector channel_3(kHalfSize); + + // Fill the aligned input buffer. + for (size_t i = 0; i < kQuadSize; ++i) { + interleaved[i] = static_cast<int16_t>(1 + (i % kNumQuadChannels)); + } + + // Clear the output buffers. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = static_cast<int16_t>(0); + channel_1[i] = static_cast<int16_t>(0); + channel_2[i] = static_cast<int16_t>(0); + channel_3[i] = static_cast<int16_t>(0); + } + + // Test the case where input is aligned. + DeinterleaveQuad(kHalfSize, interleaved.data(), workspace.data(), + channel_0.data(), channel_1.data(), channel_2.data(), + channel_3.data()); + + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_EQ(channel_0[i], kOne); + EXPECT_EQ(channel_1[i], kTwo); + EXPECT_EQ(channel_2[i], kThree); + EXPECT_EQ(channel_3[i], kFour); + } +} + +TEST(SimdUtilsTest, InterleaveQuadFloatTest) { + AudioBuffer interleaved(kNumMonoChannels, kQuadSize); + AudioBuffer workspace(kNumMonoChannels, kPentSize); + AudioBuffer planar(kNumQuadChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + AudioBuffer::Channel& channel_2 = planar[2]; + AudioBuffer::Channel& channel_3 = planar[3]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kHalfSize; ++i) { + channel_0[i] = 1.0f; + channel_1[i] = 2.0f; + channel_2[i] = 3.0f; + channel_3[i] = 4.0f; + } + + InterleaveQuad(kHalfSize, channel_0.begin(), channel_1.begin(), + channel_2.begin(), channel_3.begin(), workspace[0].begin(), + interleaved[0].begin()); + + for (size_t i = 0; i < kQuadSize; ++i) { + const float value = static_cast<float>(1 + (i % kNumQuadChannels)); + EXPECT_FLOAT_EQ(interleaved[0][i], value); + } +} + +TEST(SimdUtilsTest, DeinterleaveQuadFloatTest) { + AudioBuffer interleaved(kNumMonoChannels, kQuadSize); + AudioBuffer workspace(kNumMonoChannels, kPentSize); + AudioBuffer planar(kNumQuadChannels, kHalfSize); + AudioBuffer::Channel& channel_0 = planar[0]; + AudioBuffer::Channel& channel_1 = planar[1]; + AudioBuffer::Channel& channel_2 = planar[2]; + AudioBuffer::Channel& channel_3 = planar[3]; + + // Fill the aligned input buffer. + for (size_t i = 0; i < kQuadSize; ++i) { + interleaved[0][i] = static_cast<float>(1 + (i % kNumQuadChannels)); + } + + // Clear the output buffers. + planar.Clear(); + + // Test the case where input is aligned. + DeinterleaveQuad(kHalfSize, interleaved[0].begin(), workspace[0].begin(), + channel_0.begin(), channel_1.begin(), channel_2.begin(), + channel_3.begin()); + + for (size_t i = 0; i < kHalfSize; ++i) { + EXPECT_FLOAT_EQ(channel_0[i], 1.0f); + EXPECT_FLOAT_EQ(channel_1[i], 2.0f); + EXPECT_FLOAT_EQ(channel_2[i], 3.0f); + EXPECT_FLOAT_EQ(channel_3[i], 4.0f); + } +} + +TEST(SimdUtilsTest, Int16FromFloatTest) { + AudioBuffer float_buffer(kNumMonoChannels, kInputSize); + float_buffer.Clear(); + + AudioBuffer::AlignedInt16Vector int_buffer(kInputSize); + + for (size_t i = 0; i < kInputSize; ++i) { + float_buffer[0][i] = kFloatInput[i]; + } + + Int16FromFloat(kInputSize, &(float_buffer[0][0]), int_buffer.data()); + + for (size_t i = 0; i < kInputSize; ++i) { + EXPECT_NEAR(int_buffer[i], kIntInput[i], kIntEpsilon); + } +} + +TEST(SimdUtilsTest, FloatFromInt16Test) { + AudioBuffer float_buffer(kNumMonoChannels, kInputSize); + float_buffer.Clear(); + + AudioBuffer::AlignedInt16Vector int_buffer(kInputSize); + + for (size_t i = 0; i < kInputSize; ++i) { + int_buffer[i] = static_cast<int16_t>(kIntInput[i]); + } + + FloatFromInt16(kInputSize, int_buffer.data(), &(float_buffer[0][0])); + + for (size_t i = 0; i < kInputSize; i += 2) { + EXPECT_NEAR(float_buffer[0][i], kFloatInput[i], kFloatEpsilon); + } +} + +} // namespace + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/source_parameters.h b/src/3rdparty/resonance-audio/resonance_audio/base/source_parameters.h new file mode 100644 index 000000000..bf24a424e --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/source_parameters.h @@ -0,0 +1,101 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_SOURCE_PARAMETERS_H_ +#define RESONANCE_AUDIO_BASE_SOURCE_PARAMETERS_H_ + +#include "api/resonance_audio_api.h" + +#include "base/constants_and_types.h" +#include "base/object_transform.h" + +namespace vraudio { + +// Gain attenuation types for audio sources. +enum AttenuationType { + kInput = 0, + kDirect, + kReflections, + kReverb, + kNumAttenuationTypes +}; + +// Parameters describing an audio source. +struct SourceParameters { + // Object transform associated with this buffer. + ObjectTransform object_transform; + + // Angular spread in degrees. Range [0, 360]. + float spread_deg = 0.0f; + + // Source gain factor. + float gain = 1.0f; + + // Source gain attenuation factors to be calculated per each buffer. + float attenuations[kNumAttenuationTypes]; + + // Distance attenuation. Value 1 represents no attenuation should be applied, + // value 0 will fully attenuate the volume. Range [0, 1]. + float distance_attenuation = 1.0f; + + // Distance attenuation rolloff model to use. + DistanceRolloffModel distance_rolloff_model = + DistanceRolloffModel::kLogarithmic; + + // Minimum distance at which to apply distance attenuation. + float minimum_distance = 0.0f; + + // Maximum distance at which to apply distance attenuation. + float maximum_distance = 500.0f; + + // Alpha weighting of source's directivity pattern. This sets the balance + // between the dipole and omnidirectional directivity patterns which combine + // to produce the single directivity output value. Range [0, 1], where 0 is + // fully omnidirectional and 1 is fully dipole. + float directivity_alpha = 0.0f; + + // Source directivity order. Increasing this value increases the directivity + // towards the front of the source. Range [1, inf). + float directivity_order = 1.0f; + + // Alpha weighting of listener's directivity pattern. This sets the balance + // between the dipole and omnidirectional pickup patterns which combine to + // produce the single output value. Range [0, 1], where 0 is fully + // omnidirectional and 1 is fully dipole. + float listener_directivity_alpha = 0.0f; + + // Listener directivity order. Increasing this value increases the directivity + // towards the front of the listener. Range [1, inf). + float listener_directivity_order = 1.0f; + + // Occlusion intensity. Value 0 represents no occlusion, values greater than 1 + // represent multiple occlusions. The intensity of each occlusion is scaled + // in range [0, 1]. + float occlusion_intensity = 0.0f; + + // Near field effect gain. Range [0, 9]. + float near_field_gain = 0.0f; + + // Source gain factor for the room effects. + float room_effects_gain = 1.0f; + + // Whether the source uses binaural rendering or stereo panning. + bool enable_hrtf = true; +}; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_SOURCE_PARAMETERS_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle.cc b/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle.cc new file mode 100644 index 000000000..4b028979d --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle.cc @@ -0,0 +1,78 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/spherical_angle.h" + +#include <cmath> + +#include "base/constants_and_types.h" + +namespace vraudio { + +SphericalAngle::SphericalAngle(float azimuth, float elevation) + : azimuth_(azimuth), elevation_(elevation) {} + +SphericalAngle::SphericalAngle() : SphericalAngle(0.0f, 0.0f) {} + +SphericalAngle::SphericalAngle(const SphericalAngle& other) + : azimuth_(other.azimuth_), elevation_(other.elevation_) {} + +SphericalAngle& SphericalAngle::operator=(const SphericalAngle other) { + if (&other == this) { + return *this; + } + this->azimuth_ = other.azimuth_; + this->elevation_ = other.elevation_; + return *this; +} + +SphericalAngle SphericalAngle::FromWorldPosition( + const WorldPosition& world_position) { + return SphericalAngle( + std::atan2(-world_position[0], -world_position[2]), + std::atan2(world_position[1], + std::sqrt(world_position[0] * world_position[0] + + world_position[2] * world_position[2]))); +} + +SphericalAngle SphericalAngle::FromDegrees(float azimuth_degrees, + float elevation_degrees) { + return SphericalAngle(azimuth_degrees * kRadiansFromDegrees, + elevation_degrees * kRadiansFromDegrees); +} + +SphericalAngle SphericalAngle::FlipAzimuth() const { + return SphericalAngle(-azimuth_, elevation_); +} + +WorldPosition SphericalAngle::GetWorldPositionOnUnitSphere() const { + return WorldPosition(-std::cos(elevation_) * std::sin(azimuth_), + std::sin(elevation_), + -std::cos(elevation_) * std::cos(azimuth_)); +} + +SphericalAngle SphericalAngle::Rotate(const WorldRotation& rotation) const { + const WorldPosition original_world_position = GetWorldPositionOnUnitSphere(); + const WorldPosition rotated_world_position = + rotation * original_world_position; + return FromWorldPosition(rotated_world_position); +} + +bool SphericalAngle::operator==(const SphericalAngle& other) const { + return (azimuth_ == other.azimuth_) && (elevation_ == other.elevation_); +} + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle.h b/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle.h new file mode 100644 index 000000000..4559f19d1 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle.h @@ -0,0 +1,87 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_SPHERICAL_ANGLE_H_ +#define RESONANCE_AUDIO_BASE_SPHERICAL_ANGLE_H_ + +#include "base/misc_math.h" + +namespace vraudio { + +// Represents angular position on a sphere in terms of azimuth and elevation. +class SphericalAngle { + public: + // Constructs a spherical angle with the given azimuth and elevation. + SphericalAngle(float azimuth, float elevation); + + // Constructs a default spherical angle (azimuth = 0, elevation = 0). + SphericalAngle(); + + // Constructs a spherical angle from a given one. + SphericalAngle(const SphericalAngle& other); + + SphericalAngle& operator=(const SphericalAngle other); + + // Returns a spherical angle representation of given |world_position| (World + // Space). + // + // @param world_position 3D position in world space. + // @return Spherical angle that represents the |world_position|. + static SphericalAngle FromWorldPosition(const WorldPosition& world_position); + + // Returns a spherical angle from azimuth and elevation in degrees. + static SphericalAngle FromDegrees(float azimuth_degrees, + float elevation_degrees); + + // Returns another spherical angle with the same elevation but the azimuth + // sign flipped. + // + // @return Horizontally flipped version of the spherical angle. + SphericalAngle FlipAzimuth() const; + + // Returns the |WorldPosition| coordinates (World Space) on the unit sphere + // corresponding to this spherical angle. The transformation is + // defined as such: + // x = -cos(elevation) * sin(azimuth) + // y = sin(elevation) + // z = -cos(elevation) * cos(azimuth) + // + // @return 3D position in world space. + WorldPosition GetWorldPositionOnUnitSphere() const; + + // Returns the rotated version of the spherical angle using given + // |WorldRotation|. + // + // @param rotation Rotation to be applied to the spherical angle. + // @return Rotated version of the spherical angle. + SphericalAngle Rotate(const WorldRotation& rotation) const; + + void set_azimuth(float azimuth) { azimuth_ = azimuth; } + void set_elevation(float elevation) { elevation_ = elevation; } + + float azimuth() const { return azimuth_; } + float elevation() const { return elevation_; } + + bool operator==(const SphericalAngle& other) const; + + private: + float azimuth_; + float elevation_; +}; + +} // namespace vraudio + +#endif // RESONANCE_AUDIO_BASE_SPHERICAL_ANGLE_H_ diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle_test.cc b/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle_test.cc new file mode 100644 index 000000000..49ee16d5d --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/spherical_angle_test.cc @@ -0,0 +1,122 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include "base/spherical_angle.h" + +#include "third_party/googletest/googletest/include/gtest/gtest.h" +#include "base/constants_and_types.h" + +namespace vraudio { + +namespace { + +// Spherical angle to be used in the rotation tests. +const float kAzimuth = 0.0f; +const float kElevation = 0.0f; +const SphericalAngle kSphericalAngle(0.0f, 0.0f); + +// Arbitrary rotation angle to be used in the rotation tests. +const float kRotationAngle = 10.0f * kRadiansFromDegrees; + +// Tests that the GetWorldPositionOnUnitSphere() and FromWorldPosition() +// functions act as perfect inverses of one another for angles defined on the +// unit sphere (in this case the vraudio cube speaker layout). +TEST(SphericalAngleTest, CartesianToSphericalAndBackTest) { + // Azimuth and elevation angles of the cubic spherical loudspeaker array. + const std::vector<SphericalAngle> kCubeAngles = { + SphericalAngle::FromDegrees(45.0f, 35.26f), + SphericalAngle::FromDegrees(-45.0f, 35.26f), + SphericalAngle::FromDegrees(-135.0f, 35.26f), + SphericalAngle::FromDegrees(135.0f, 35.26f), + SphericalAngle::FromDegrees(45.0f, -35.26f), + SphericalAngle::FromDegrees(-45.0f, -35.26f), + SphericalAngle::FromDegrees(-135.0f, -35.26f), + SphericalAngle::FromDegrees(135.0f, -35.26f)}; + + for (size_t i = 0; i < kCubeAngles.size(); ++i) { + const WorldPosition position = + kCubeAngles[i].GetWorldPositionOnUnitSphere(); + const SphericalAngle angle = SphericalAngle::FromWorldPosition(position); + EXPECT_EQ(kCubeAngles[i].azimuth(), angle.azimuth()); + EXPECT_EQ(kCubeAngles[i].elevation(), angle.elevation()); + } +} + +// Tests the horizontal angle flip across the median plane. +TEST(SphericalAngleTest, FlipTest) { + const std::vector<SphericalAngle> kTestAngles = { + SphericalAngle::FromDegrees(45.0f, 35.26f), + SphericalAngle::FromDegrees(-15.0f, -10.0f)}; + + for (size_t i = 0; i < kTestAngles.size(); ++i) { + SphericalAngle flipped_spherical_angle = kTestAngles[i].FlipAzimuth(); + + // Check if the flipped spherical anglee is correct. + EXPECT_NEAR(kTestAngles[i].azimuth(), -flipped_spherical_angle.azimuth(), + kEpsilonFloat); + EXPECT_NEAR((kTestAngles[i].elevation()), + flipped_spherical_angle.elevation(), kEpsilonFloat); + } +} + +// Tests that the Rotate() function correctly rotates the spherical angle +// against the x axis (right facing). +TEST(SphericalAngleTest, RotateXTest) { + const WorldPosition kAxis = {1.0f, 0.0f, 0.0f}; + const WorldRotation kRotation(AngleAxisf(kRotationAngle, kAxis)); + // Rotate against the x axis (right facing). + + const SphericalAngle kXrotatedSphericalAngle = + kSphericalAngle.Rotate(kRotation); + + // Check if the rotated spherical angle is correct. + EXPECT_NEAR(kAzimuth, kXrotatedSphericalAngle.azimuth(), kEpsilonFloat); + EXPECT_NEAR((kElevation + kRotationAngle), + kXrotatedSphericalAngle.elevation(), kEpsilonFloat); +} + +// Tests that the Rotate() function correctly rotates the spherical angle +// against the y axis (upward facing). +TEST(SphericalAngleTest, RotateYTest) { + const WorldPosition kAxis(0.0f, 1.0f, 0.0f); + const WorldRotation kRotation(AngleAxisf(kRotationAngle, kAxis)); + // Rotate against the y axis (upward facing). + const SphericalAngle kYrotatedSphericalAngle = + kSphericalAngle.Rotate(kRotation); + + // Check if the rotated spherical angle is correct. + EXPECT_NEAR((kAzimuth + kRotationAngle), kYrotatedSphericalAngle.azimuth(), + kEpsilonFloat); + EXPECT_NEAR(kElevation, kYrotatedSphericalAngle.elevation(), kEpsilonFloat); +} + +// Tests that the Rotate() function correctly rotates the spherical angle +// against the Z axis (forward facing). +TEST(SphericalAngleTest, RotateZTest) { + const WorldPosition kAxis = {0.0f, 0.0f, 1.0f}; + const WorldRotation kRotation(AngleAxisf(kRotationAngle, kAxis)); + // Rotate against the z axis (forward facing). + const SphericalAngle kZrotatedSphericalAngle = + kSphericalAngle.Rotate(kRotation); + + // Check if the rotated spherical angle is correct. + EXPECT_NEAR(kAzimuth, kZrotatedSphericalAngle.azimuth(), kEpsilonFloat); + EXPECT_NEAR(kElevation, kZrotatedSphericalAngle.elevation(), kEpsilonFloat); +} + +} // namespace + +} // namespace vraudio diff --git a/src/3rdparty/resonance-audio/resonance_audio/base/unique_ptr_wrapper.h b/src/3rdparty/resonance-audio/resonance_audio/base/unique_ptr_wrapper.h new file mode 100644 index 000000000..b8e848135 --- /dev/null +++ b/src/3rdparty/resonance-audio/resonance_audio/base/unique_ptr_wrapper.h @@ -0,0 +1,33 @@ +/* +Copyright 2018 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS-IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#ifndef RESONANCE_AUDIO_BASE_UNIQUE_PTR_WRAPPER_H_ +#define RESONANCE_AUDIO_BASE_UNIQUE_PTR_WRAPPER_H_ + +#include <memory> + +// Wrapper around std::unique_ptr to enable the binding of unique_ptr buffers to +// std:function and/or lambda function. +template <typename T> +struct UniquePtrWrapper { + UniquePtrWrapper(const UniquePtrWrapper& other) : ptr(std::move(other.ptr)) {} + UniquePtrWrapper(UniquePtrWrapper&& other) : ptr(std::move(other.ptr)) {} + explicit UniquePtrWrapper(std::unique_ptr<T> buffer) + : ptr(std::move(buffer)) {} + mutable std::unique_ptr<T> ptr; +}; + +#endif // RESONANCE_AUDIO_BASE_UNIQUE_PTR_WRAPPER_H_ |