fix macOS build (following Projucer changes made in Windows, which removed /Applications/JUCE/modules from its headers). move JUCE headers under source control, so that Windows and macOS can both build against same version of JUCE. remove AUv3 target (I think it's an iOS thing, so it will never work with this macOS fluidsynth dylib).

This commit is contained in:
Alex Birch
2018-06-17 13:34:53 +01:00
parent a2be47c887
commit dff4d13a1d
1563 changed files with 601601 additions and 3466 deletions

View File

@ -0,0 +1,696 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2017 - ROLI Ltd.
JUCE is an open source library subject to commercial or open-source
licensing.
By using JUCE, you agree to the terms of both the JUCE 5 End-User License
Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
27th April 2017).
End User License Agreement: www.juce.com/juce-5-licence
Privacy Policy: www.juce.com/juce-5-privacy-policy
Or: You may also use this code under the terms of the GPL v3 (see
www.gnu.org/licenses).
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
DISCLAIMED.
==============================================================================
*/
namespace juce
{
namespace dsp
{
#ifndef DOXYGEN
namespace SampleTypeHelpers // Internal classes needed for handling sample type classes
{
template <typename Container> struct ElementType { using Type = typename Container::value_type; };
template <> struct ElementType<float> { using Type = float; };
template <> struct ElementType<double> { using Type = double; };
template <> struct ElementType<long double> { using Type = long double; };
}
#endif
//==============================================================================
/**
Minimal and lightweight data-structure which contains a list of pointers to
channels containing some kind of sample data.
This class doesn't own any of the data which it points to, it's simply a view
into data that is owned elsewhere. You can construct one from some raw data
that you've allocated yourself, or give it a HeapBlock to use, or give it
an AudioBuffer which it can refer to, but in all cases the user is
responsible for making sure that the data doesn't get deleted while there's
still an AudioBlock using it.
@tags{DSP}
*/
template <typename SampleType>
class AudioBlock
{
public:
//==============================================================================
using NumericType = typename SampleTypeHelpers::ElementType<SampleType>::Type;
//==============================================================================
/** Create a zero-sized AudioBlock. */
forcedinline AudioBlock() noexcept {}
/** Creates an AudioBlock from a pointer to an array of channels.
AudioBlock does not copy nor own the memory pointed to by dataToUse.
Therefore it is the user's responsibility to ensure that the memory is retained
throughout the life-time of the AudioBlock and released when no longer needed.
*/
forcedinline AudioBlock (SampleType* const* channelData,
size_t numberOfChannels, size_t numberOfSamples) noexcept
: channels (channelData),
numChannels (static_cast<ChannelCountType> (numberOfChannels)),
numSamples (numberOfSamples)
{
}
/** Creates an AudioBlock from a pointer to an array of channels.
AudioBlock does not copy nor own the memory pointed to by dataToUse.
Therefore it is the user's responsibility to ensure that the memory is retained
throughout the life-time of the AudioBlock and released when no longer needed.
*/
forcedinline AudioBlock (SampleType* const* channelData, size_t numberOfChannels,
size_t startSampleIndex, size_t numberOfSamples) noexcept
: channels (channelData),
numChannels (static_cast<ChannelCountType> (numberOfChannels)),
startSample (startSampleIndex),
numSamples (numberOfSamples)
{
}
/** Allocates a suitable amount of space in a HeapBlock, and initialises this object
to point into it.
The HeapBlock must of course not be freed or re-allocated while this object is still in
use, because it will be referencing its data.
*/
AudioBlock (HeapBlock<char>& heapBlockToUseForAllocation,
size_t numberOfChannels, size_t numberOfSamples,
size_t alignmentInBytes = defaultAlignment) noexcept
: numChannels (static_cast<ChannelCountType> (numberOfChannels)),
numSamples (numberOfSamples)
{
auto roundedUpNumSamples = (numberOfSamples + elementMask) & ~elementMask;
auto channelSize = sizeof (SampleType) * roundedUpNumSamples;
auto channelListBytes = sizeof (SampleType*) * numberOfChannels;
auto extraBytes = alignmentInBytes - 1;
heapBlockToUseForAllocation.malloc (channelListBytes + extraBytes + channelSize * numberOfChannels);
auto* chanArray = reinterpret_cast<SampleType**> (heapBlockToUseForAllocation.getData());
channels = chanArray;
auto* data = reinterpret_cast<SampleType*> (addBytesToPointer (chanArray, channelListBytes));
data = snapPointerToAlignment (data, alignmentInBytes);
for (ChannelCountType i = 0; i < numChannels; ++i)
{
chanArray[i] = data;
data += roundedUpNumSamples;
}
}
/** Creates an AudioBlock that points to the data in an AudioBuffer.
AudioBlock does not copy nor own the memory pointed to by dataToUse.
Therefore it is the user's responsibility to ensure that the buffer is retained
throughout the life-time of the AudioBlock without being modified.
*/
AudioBlock (AudioBuffer<SampleType>& buffer) noexcept
: channels (buffer.getArrayOfWritePointers()),
numChannels (static_cast<ChannelCountType> (buffer.getNumChannels())),
numSamples (static_cast<size_t> (buffer.getNumSamples()))
{
}
/** Creates an AudioBlock that points to the data in an AudioBuffer.
AudioBlock does not copy nor own the memory pointed to by dataToUse.
Therefore it is the user's responsibility to ensure that the buffer is retained
throughout the life-time of the AudioBlock without being modified.
*/
AudioBlock (AudioBuffer<SampleType>& buffer, size_t startSampleIndex) noexcept
: channels (buffer.getArrayOfWritePointers()),
numChannels (static_cast<ChannelCountType> (buffer.getNumChannels())),
startSample (startSampleIndex),
numSamples (static_cast<size_t> (buffer.getNumSamples()))
{
jassert (startSample < numSamples);
}
AudioBlock (const AudioBlock& other) noexcept = default;
AudioBlock& operator= (const AudioBlock& other) noexcept = default;
//==============================================================================
forcedinline size_t getNumSamples() const noexcept { return numSamples; }
forcedinline size_t getNumChannels() const noexcept { return static_cast<size_t> (numChannels); }
/** Returns a raw pointer into one of the channels in this block. */
forcedinline const SampleType* getChannelPointer (size_t channel) const noexcept
{
jassert (channel < numChannels);
jassert (numSamples > 0);
return channels[channel] + startSample;
}
/** Returns a raw pointer into one of the channels in this block. */
forcedinline SampleType* getChannelPointer (size_t channel) noexcept
{
jassert (channel < numChannels);
jassert (numSamples > 0);
return channels[channel] + startSample;
}
/** Returns an AudioBlock that represents one of the channels in this block. */
forcedinline AudioBlock getSingleChannelBlock (size_t channel) const noexcept
{
jassert (channel < numChannels);
return AudioBlock (channels + channel, 1, startSample, numSamples);
}
/** Returns a subset of continguous channels
@param channelStart First channel of the subset
@param numChannelsToUse Count of channels in the subset
*/
forcedinline AudioBlock getSubsetChannelBlock (size_t channelStart, size_t numChannelsToUse) noexcept
{
jassert (channelStart < numChannels);
jassert ((channelStart + numChannelsToUse) <= numChannels);
return AudioBlock (channels + channelStart, numChannelsToUse, startSample, numSamples);
}
/** Returns a sample from the buffer.
The channel and index are not checked - they are expected to be in-range. If not,
an assertion will be thrown, but in a release build, you're into 'undefined behaviour'
territory.
*/
SampleType getSample (int channel, int sampleIndex) const noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (isPositiveAndBelow (sampleIndex, numSamples));
return channels[channel][startSample + sampleIndex];
}
/** Modifies a sample in the buffer.
The channel and index are not checked - they are expected to be in-range. If not,
an assertion will be thrown, but in a release build, you're into 'undefined behaviour'
territory.
*/
void setSample (int destChannel, int destSample, SampleType newValue) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (isPositiveAndBelow (destSample, numSamples));
channels[destChannel][startSample + destSample] = newValue;
}
/** Adds a value to a sample in the buffer.
The channel and index are not checked - they are expected to be in-range. If not,
an assertion will be thrown, but in a release build, you're into 'undefined behaviour'
territory.
*/
void addSample (int destChannel, int destSample, SampleType valueToAdd) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (isPositiveAndBelow (destSample, numSamples));
channels[destChannel][startSample + destSample] += valueToAdd;
}
//==============================================================================
/** Clear the memory described by this AudioBlock. */
forcedinline AudioBlock& clear() noexcept
{
auto n = static_cast<int> (numSamples * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::clear (channelPtr (ch), n);
return *this;
}
/** Fill memory with value. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE fill (SampleType value) noexcept
{
auto n = static_cast<int> (numSamples * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::fill (channelPtr (ch), value, n);
return *this;
}
/** Copy the values in src to the receiver. */
forcedinline AudioBlock& copy (AudioBlock src) noexcept
{
auto maxChannels = jmin (src.numChannels, numChannels);
auto n = static_cast<int> (jmin (src.numSamples, numSamples) * sizeFactor);
for (size_t ch = 0; ch < maxChannels; ++ch)
FloatVectorOperations::copy (channelPtr (ch), src.channelPtr (ch), n);
return *this;
}
/** Copy the values from a JUCE's AudioBuffer to the receiver.
All indices and sizes are in the receiver's units, i.e. if SampleType is a
SIMDRegister then incrementing srcPos by one will increase the sample position
in the AudioBuffer's units by a factor of SIMDRegister<SampleType>::SIMDNumElements.
*/
forcedinline AudioBlock& copyFrom (const AudioBuffer<NumericType>& src, size_t srcPos = 0, size_t dstPos = 0,
size_t numElements = std::numeric_limits<size_t>::max())
{
auto srclen = static_cast<size_t> (src.getNumSamples()) / sizeFactor;
auto n = static_cast<int> (jmin (srclen - srcPos, numSamples - dstPos, numElements) * sizeFactor);
auto maxChannels = jmin (static_cast<size_t> (src.getNumChannels()), static_cast<size_t> (numChannels));
for (size_t ch = 0; ch < maxChannels; ++ch)
FloatVectorOperations::copy (channelPtr (ch),
src.getReadPointer (static_cast<int> (ch),
static_cast<int> (srcPos * sizeFactor)),
n);
return *this;
}
/** Copy the values from the receiver to a JUCE's AudioBuffer.
All indices and sizes are in the receiver's units, i.e. if SampleType is a
SIMDRegister then incrementing dstPos by one will increase the sample position
in the AudioBuffer's units by a factor of SIMDRegister<SampleType>::SIMDNumElements.
*/
forcedinline const AudioBlock& copyTo (AudioBuffer<NumericType>& dst, size_t srcPos = 0, size_t dstPos = 0,
size_t numElements = std::numeric_limits<size_t>::max()) const
{
auto dstlen = static_cast<size_t> (dst.getNumSamples()) / sizeFactor;
auto n = static_cast<int> (jmin (numSamples - srcPos, dstlen - dstPos, numElements) * sizeFactor);
auto maxChannels = jmin (static_cast<size_t> (dst.getNumChannels()), static_cast<size_t> (numChannels));
for (size_t ch = 0; ch < maxChannels; ++ch)
FloatVectorOperations::copy (dst.getWritePointer (static_cast<int> (ch),
static_cast<int> (dstPos * sizeFactor)),
channelPtr (ch), n);
return *this;
}
/** Move memory within the receiver from the position srcPos to the position dstPos.
If numElements is not specified then move will move the maximum amount of memory.
*/
forcedinline AudioBlock& move (size_t srcPos, size_t dstPos,
size_t numElements = std::numeric_limits<size_t>::max()) noexcept
{
jassert (srcPos <= numSamples && dstPos <= numSamples);
auto len = jmin (numSamples - srcPos, numSamples - dstPos, numElements) * sizeof (SampleType);
if (len != 0)
for (size_t ch = 0; ch < numChannels; ++ch)
::memmove (getChannelPointer (ch) + dstPos,
getChannelPointer (ch) + srcPos, len);
return *this;
}
//==============================================================================
/** Return a new AudioBlock pointing to a sub-block inside the receiver. This
function does not copy the memory and you must ensure that the original memory
pointed to by the receiver remains valid through-out the life-time of the
returned sub-block.
@param newOffset The index of an element inside the reciever which will
will become the first element of the return value.
@param newLength The number of elements of the newly created sub-block.
*/
inline AudioBlock getSubBlock (size_t newOffset, size_t newLength) const noexcept
{
jassert (newOffset < numSamples);
jassert (newOffset + newLength <= numSamples);
return AudioBlock (channels, numChannels, startSample + newOffset, newLength);
}
/** Return a new AudioBlock pointing to a sub-block inside the receiver. This
function does not copy the memory and you must ensure that the original memory
pointed to by the receiver remains valid through-out the life-time of the
returned sub-block.
@param newOffset The index of an element inside the reciever which will
will become the first element of the return value.
The return value will include all subsequent elements
of the receiver.
*/
inline AudioBlock getSubBlock (size_t newOffset) const noexcept
{
return getSubBlock (newOffset, getNumSamples() - newOffset);
}
//==============================================================================
/** Adds a fixed value to the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE add (SampleType value) noexcept
{
auto n = static_cast<int> (numSamples * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::add (channelPtr (ch), value, n);
return *this;
}
/** Adds the source values to the receiver. */
forcedinline AudioBlock& add (AudioBlock src) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::add (channelPtr (ch), src.channelPtr (ch), n);
return *this;
}
/** Adds a fixed value to each source value and stores it in the destination array of the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE add (AudioBlock src, SampleType value) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::add (channelPtr (ch), src.channelPtr (ch), value, n);
return *this;
}
/** Adds each source1 value to the corresponding source2 value and stores it in the destination array of the receiver. */
forcedinline AudioBlock& add (AudioBlock src1, AudioBlock src2) noexcept
{
jassert (numChannels == src1.numChannels && src1.numChannels == src2.numChannels);
auto n = static_cast<int> (jmin (numSamples, src1.numSamples, src2.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::add (channelPtr (ch), src1.channelPtr (ch), src2.getChannelPointer (ch), n);
return *this;
}
/** Subtracts a fixed value from the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE subtract (SampleType value) noexcept
{
return add (value * static_cast<SampleType> (-1.0));
}
/** Subtracts the source values from the receiver. */
forcedinline AudioBlock& subtract (AudioBlock src) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::subtract (channelPtr (ch), src.channelPtr (ch), n);
return *this;
}
/** Subtracts a fixed value from each source value and stores it in the destination array of the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE subtract (AudioBlock src, SampleType value) noexcept
{
return add (src, static_cast<SampleType> (-1.0) * value);
}
/** Subtracts each source2 value from the corresponding source1 value and stores it in the destination array of the receiver. */
forcedinline AudioBlock& subtract (AudioBlock src1, AudioBlock src2) noexcept
{
jassert (numChannels == src1.numChannels && src1.numChannels == src2.numChannels);
auto n = static_cast<int> (jmin (numSamples, src1.numSamples, src2.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::subtract (channelPtr (ch), src1.channelPtr (ch), src2.channelPtr (ch), n);
return *this;
}
/** Multiplies a fixed value to the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE multiply (SampleType value) noexcept
{
auto n = static_cast<int> (numSamples * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::multiply (channelPtr (ch), value, n);
return *this;
}
/** Multiplies the source values to the receiver. */
forcedinline AudioBlock& multiply (AudioBlock src) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::multiply (channelPtr (ch), src.channelPtr (ch), n);
return *this;
}
/** Multiplies a fixed value to each source value and stores it in the destination array of the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE multiply (AudioBlock src, SampleType value) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::multiply (channelPtr (ch), src.channelPtr (ch), value, n);
return *this;
}
/** Multiplies each source1 value to the corresponding source2 value and stores it in the destination array of the receiver. */
forcedinline AudioBlock& multiply (AudioBlock src1, AudioBlock src2) noexcept
{
jassert (numChannels == src1.numChannels && src1.numChannels == src2.numChannels);
auto n = static_cast<int> (jmin (numSamples, src1.numSamples, src2.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::multiply (channelPtr (ch), src1.channelPtr (ch), src2.channelPtr (ch), n);
return *this;
}
/** Multiplies all channels of the AudioBlock by a smoothly changing value and stores them . */
AudioBlock& multiply (LinearSmoothedValue<SampleType>& value) noexcept
{
if (! value.isSmoothing())
{
*this *= value.getTargetValue();
}
else
{
for (size_t i = 0; i < numSamples; ++i)
{
const auto scaler = value.getNextValue();
for (size_t ch = 0; ch < numChannels; ++ch)
channelPtr (ch)[i] *= scaler;
}
}
return *this;
}
/** Multiplies all channels of the source by a smoothly changing value and stores them in the receiver. */
AudioBlock& multiply (AudioBlock src, LinearSmoothedValue<SampleType>& value) noexcept
{
jassert (numChannels == src.numChannels);
if (! value.isSmoothing())
{
copy (src);
}
else
{
auto n = jmin (numSamples, src.numSamples) * sizeFactor;
for (size_t i = 0; i < n; ++i)
{
const auto scaler = value.getNextValue();
for (size_t ch = 0; ch < numChannels; ++ch)
channelPtr (ch)[i] = scaler * src.getChannelPointer (ch)[i];
}
}
return *this;
}
/** Multiplies each value in src with factor and adds the result to the receiver. */
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE addWithMultiply (AudioBlock src, SampleType factor) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::addWithMultiply (channelPtr (ch), src.channelPtr (ch), factor, n);
return *this;
}
/** Multiplies each value in srcA with the corresponding value in srcB and adds the result to the receiver. */
forcedinline AudioBlock& addWithMultiply (AudioBlock src1, AudioBlock src2) noexcept
{
jassert (numChannels == src1.numChannels && src1.numChannels == src2.numChannels);
auto n = static_cast<int> (jmin (numSamples, src1.numSamples, src2.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::addWithMultiply (channelPtr (ch), src1.channelPtr (ch), src2.channelPtr (ch), n);
return *this;
}
/** Negates each value of the receiver. */
forcedinline AudioBlock& negate() noexcept
{
return multiply (static_cast<SampleType> (-1.0));
}
/** Negates each value of source and stores it in the receiver. */
forcedinline AudioBlock& replaceWithNegativeOf (AudioBlock src) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::negate (channelPtr (ch), src.channelPtr (ch), n);
return *this;
}
/** Takes the absolute value of each element of src and stores it inside the receiver. */
forcedinline AudioBlock& replaceWithAbsoluteValueOf (AudioBlock src) noexcept
{
jassert (numChannels == src.numChannels);
auto n = static_cast<int> (jmin (numSamples, src.numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::abs (channelPtr (ch), src.channelPtr (ch), n);
return *this;
}
/** Each element of receiver will be the minimum of the corresponding element of the source arrays. */
forcedinline AudioBlock& min (AudioBlock src1, AudioBlock src2) noexcept
{
jassert (numChannels == src1.numChannels && src1.numChannels == src2.numChannels);
auto n = static_cast<int> (jmin (src1.numSamples, src2.numSamples, numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::min (channelPtr (ch), src1.channelPtr (ch), src2.channelPtr (ch), n);
return *this;
}
/** Each element of the receiver will be the maximum of the corresponding element of the source arrays. */
forcedinline AudioBlock& max (AudioBlock src1, AudioBlock src2) noexcept
{
jassert (numChannels == src1.numChannels && src1.numChannels == src2.numChannels);
auto n = static_cast<int> (jmin (src1.numSamples, src2.numSamples, numSamples) * sizeFactor);
for (size_t ch = 0; ch < numChannels; ++ch)
FloatVectorOperations::max (channelPtr (ch), src1.channelPtr (ch), src2.channelPtr (ch), n);
return *this;
}
/** Finds the minimum and maximum value of the buffer. */
forcedinline Range<NumericType> findMinAndMax() const noexcept
{
if (numChannels == 0)
return {};
auto n = static_cast<int> (numSamples * sizeFactor);
auto minmax = FloatVectorOperations::findMinAndMax (channelPtr (0), n);
for (size_t ch = 1; ch < numChannels; ++ch)
minmax = minmax.getUnionWith (FloatVectorOperations::findMinAndMax (channelPtr (ch), n));
return minmax;
}
//==============================================================================
// convenient operator wrappers
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE operator+= (SampleType src) noexcept { return add (src); }
forcedinline AudioBlock& operator+= (AudioBlock src) noexcept { return add (src); }
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE operator-= (SampleType src) noexcept { return subtract (src); }
forcedinline AudioBlock& operator-= (AudioBlock src) noexcept { return subtract (src); }
forcedinline AudioBlock& JUCE_VECTOR_CALLTYPE operator*= (SampleType src) noexcept { return multiply (src); }
forcedinline AudioBlock& operator*= (AudioBlock src) noexcept { return multiply (src); }
forcedinline AudioBlock& operator*= (LinearSmoothedValue<SampleType>& value) noexcept { return multiply (value); }
//==============================================================================
// This class can only be used with floating point types
static_assert (std::is_same<SampleType, float>::value
|| std::is_same<SampleType, double>::value
#if JUCE_USE_SIMD
|| std::is_same<SampleType, SIMDRegister<float>>::value
|| std::is_same<SampleType, SIMDRegister<double>>::value
#endif
, "AudioBlock only supports single or double precision floating point types");
//==============================================================================
/** Applies a function to each value in an input block, putting the result into an output block.
The function supplied must take a SampleType as its parameter, and return a SampleType.
The two blocks must have the same number of channels and samples.
*/
template <typename FunctionType>
static void process (AudioBlock inBlock, AudioBlock outBlock, FunctionType&& function)
{
auto len = inBlock.getNumSamples();
auto numChans = inBlock.getNumChannels();
jassert (len == outBlock.getNumSamples());
jassert (numChans == outBlock.getNumChannels());
for (ChannelCountType c = 0; c < numChans; ++c)
{
auto* src = inBlock.getChannelPointer (c);
auto* dst = outBlock.getChannelPointer (c);
for (size_t i = 0; i < len; ++i)
dst[i] = function (src[i]);
}
}
private:
//==============================================================================
NumericType* channelPtr (size_t ch) noexcept { return reinterpret_cast<NumericType*> (getChannelPointer (ch)); }
const NumericType* channelPtr (size_t ch) const noexcept { return reinterpret_cast<const NumericType*> (getChannelPointer (ch)); }
//==============================================================================
using ChannelCountType = unsigned int;
//==============================================================================
static constexpr size_t sizeFactor = sizeof (SampleType) / sizeof (NumericType);
static constexpr size_t elementMask = sizeFactor - 1;
static constexpr size_t byteMask = (sizeFactor * sizeof (NumericType)) - 1;
#if JUCE_USE_SIMD
static constexpr size_t defaultAlignment = sizeof (SIMDRegister<NumericType>);
#else
static constexpr size_t defaultAlignment = sizeof (NumericType);
#endif
SampleType* const* channels;
ChannelCountType numChannels = 0;
size_t startSample = 0, numSamples = 0;
};
} // namespace dsp
} // namespace juce

View File

@ -0,0 +1,399 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2017 - ROLI Ltd.
JUCE is an open source library subject to commercial or open-source
licensing.
By using JUCE, you agree to the terms of both the JUCE 5 End-User License
Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
27th April 2017).
End User License Agreement: www.juce.com/juce-5-licence
Privacy Policy: www.juce.com/juce-5-privacy-policy
Or: You may also use this code under the terms of the GPL v3 (see
www.gnu.org/licenses).
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
DISCLAIMED.
==============================================================================
*/
namespace juce
{
namespace dsp
{
#ifndef DOXYGEN
// This class is needed internally.
template <typename Scalar>
struct CmplxSIMDOps;
#endif
//==============================================================================
/**
A wrapper around the platform's native SIMD register type.
This class is only availabe on SIMD machines. Use JUCE_USE_SIMD to query
if SIMD is avaialble for your system.
SIMDRegister<Type> is a templated class representing the native
vectorized version of FloatingType. SIMDRegister supports all numerical
primitive types and std:complex<float> and std::complex<double> supports
and most operations of the corresponding primitive
type. Additionally, SIMDRegister can be accessed like an array to extract
the individual elements.
If you are using SIMDRegister as a pointer, then you must ensure that the
memory is suffeciently aligned for SIMD vector operations. Failing to do so
will result in crashes or very slow code. Use SIMDRegister::isSIMDAligned
to query if a pointer is suffeciently aligned for SIMD vector operations.
Note that using SIMDRegister without enabling optimizations will result
in code with very poor performance.
@tags{DSP}
*/
template <typename Type>
struct SIMDRegister
{
//==============================================================================
/** The type that represents the individual constituents of the SIMD Register */
using ElementType = Type;
/** STL compatible value_type definition (same as ElementType). */
using value_type = ElementType;
/** The corresponding primitive integer type, for example, this will be int32_t
if type is a float. */
using MaskType = typename SIMDInternal::MaskTypeFor<ElementType>::type;
//==============================================================================
// Here are some types which are needed internally
/** The native primitive type (used internally). */
using PrimitiveType = typename SIMDInternal::PrimitiveType<ElementType>::type;
/** The native operations for this platform and type combination (used internally) */
using NativeOps = SIMDNativeOps<PrimitiveType>;
/** The native type (used internally). */
using vSIMDType = typename NativeOps::vSIMDType;
/** The corresponding integer SIMDRegister type (used internally). */
using vMaskType = SIMDRegister<MaskType>;
/** The internal native type for the corresponding mask type (used internally). */
using vMaskSIMDType = typename vMaskType::vSIMDType;
/** Wrapper for operations which need to be handled differently for complex
and scalar types (used internally). */
using CmplxOps = CmplxSIMDOps<ElementType>;
/** Type which is returned when using the subscript operator. The returned type
should be used just like the type ElementType. */
struct ElementAccess;
//==============================================================================
/** The size in bytes of this register. */
static constexpr size_t SIMDRegisterSize = sizeof (vSIMDType);
/** The number of elements that this vector can hold. */
static constexpr size_t SIMDNumElements = SIMDRegisterSize / sizeof (ElementType);
vSIMDType value;
/** Default constructor. */
inline SIMDRegister() noexcept {}
/** Constructs an object from the native SIMD type. */
inline SIMDRegister (vSIMDType a) noexcept : value (a) {}
/** Constructs an object from a scalar type by broadcasting it to all elements. */
inline SIMDRegister (Type s) noexcept { *this = s; }
/** Destrutor. */
inline ~SIMDRegister() noexcept {}
//==============================================================================
/** Returns the number of elements in this vector. */
static constexpr size_t size() noexcept { return SIMDNumElements; }
//==============================================================================
/** Creates a new SIMDRegister from the corresponding scalar primitive.
The scalar is extended to all elements of the vector. */
inline static SIMDRegister JUCE_VECTOR_CALLTYPE expand (ElementType s) noexcept { return {CmplxOps::expand (s)}; }
/** Creates a new SIMDRegister from the internal SIMD type (for example
__mm128 for single-precision floating point on SSE architectures). */
inline static SIMDRegister JUCE_VECTOR_CALLTYPE fromNative (vSIMDType a) noexcept { return {a}; }
/** Creates a new SIMDRegister from the first SIMDNumElements of a scalar array. */
inline static SIMDRegister JUCE_VECTOR_CALLTYPE fromRawArray (const ElementType* a) noexcept
{
jassert (isSIMDAligned (a));
return {CmplxOps::load (a)};
}
/** Copies the elements of the SIMDRegister to a scalar array in memory. */
inline void JUCE_VECTOR_CALLTYPE copyToRawArray (ElementType* a) const noexcept
{
jassert (isSIMDAligned (a));
CmplxOps::store (value, a);
}
//==============================================================================
/** Returns the idx-th element of the receiver. Note that this does not check if idx
is larger than the native register size. */
inline ElementType JUCE_VECTOR_CALLTYPE get (size_t idx) const noexcept
{
jassert (idx < SIMDNumElements);
return CmplxOps::get (value, idx);
}
/** Sets the idx-th element of the receiver. Note that this does not check if idx
is larger than the native register size. */
inline void JUCE_VECTOR_CALLTYPE set (size_t idx, ElementType v) noexcept
{
jassert (idx < SIMDNumElements);
value = CmplxOps::set (value, idx, v);
}
//==============================================================================
/** Returns the idx-th element of the receiver. Note that this does not check if idx
is larger than the native register size. */
inline ElementType JUCE_VECTOR_CALLTYPE operator[] (size_t idx) const noexcept
{
return get (idx);
}
/** Returns the idx-th element of the receiver. Note that this does not check if idx
is larger than the native register size. */
inline ElementAccess JUCE_VECTOR_CALLTYPE operator[] (size_t idx) noexcept
{
jassert (idx < SIMDNumElements);
return ElementAccess (*this, idx);
}
//==============================================================================
/** Adds another SIMDRegister to the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator+= (SIMDRegister v) noexcept { value = NativeOps::add (value, v.value); return *this; }
/** Subtracts another SIMDRegister to the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator-= (SIMDRegister v) noexcept { value = NativeOps::sub (value, v.value); return *this; }
/** Multiplies another SIMDRegister to the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator*= (SIMDRegister v) noexcept { value = CmplxOps::mul (value, v.value); return *this; }
//==============================================================================
/** Broadcasts the scalar to all elements of the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator= (ElementType s) noexcept { value = CmplxOps::expand (s); return *this; }
/** Adds a scalar to the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator+= (ElementType s) noexcept { value = NativeOps::add (value, CmplxOps::expand (s)); return *this; }
/** Subtracts a scalar to the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator-= (ElementType s) noexcept { value = NativeOps::sub (value, CmplxOps::expand (s)); return *this; }
/** Multiplies a scalar to the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator*= (ElementType s) noexcept { value = CmplxOps::mul (value, CmplxOps::expand (s)); return *this; }
//==============================================================================
/** Bit-and the reciver with SIMDRegister v and store the result in the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator&= (vMaskType v) noexcept { value = NativeOps::bit_and (value, toVecType (v.value)); return *this; }
/** Bit-or the reciver with SIMDRegister v and store the result in the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator|= (vMaskType v) noexcept { value = NativeOps::bit_or (value, toVecType (v.value)); return *this; }
/** Bit-xor the reciver with SIMDRegister v and store the result in the receiver. */
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator^= (vMaskType v) noexcept { value = NativeOps::bit_xor (value, toVecType (v.value)); return *this; }
//==============================================================================
/** Bit-and each element of the reciver with the scalar s and store the result in the receiver.*/
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator&= (MaskType s) noexcept { value = NativeOps::bit_and (value, toVecType (s)); return *this; }
/** Bit-or each element of the reciver with the scalar s and store the result in the receiver.*/
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator|= (MaskType s) noexcept { value = NativeOps::bit_or (value, toVecType (s)); return *this; }
/** Bit-xor each element of the reciver with the scalar s and store the result in the receiver.*/
inline SIMDRegister& JUCE_VECTOR_CALLTYPE operator^= (MaskType s) noexcept { value = NativeOps::bit_xor (value, toVecType (s)); return *this; }
//==============================================================================
/** Returns the sum of the receiver and v.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator+ (SIMDRegister v) const noexcept { return { NativeOps::add (value, v.value) }; }
/** Returns the difference of the receiver and v.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator- (SIMDRegister v) const noexcept { return { NativeOps::sub (value, v.value) }; }
/** Returns the product of the receiver and v.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator* (SIMDRegister v) const noexcept { return { CmplxOps::mul (value, v.value) }; }
//==============================================================================
/** Returns a vector where each element is the sum of the corresponding element in the receiver and the scalar s.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator+ (ElementType s) const noexcept { return { NativeOps::add (value, CmplxOps::expand (s)) }; }
/** Returns a vector where each element is the difference of the corresponding element in the receiver and the scalar s.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator- (ElementType s) const noexcept { return { NativeOps::sub (value, CmplxOps::expand (s)) }; }
/** Returns a vector where each element is the product of the corresponding element in the receiver and the scalar s.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator* (ElementType s) const noexcept { return { CmplxOps::mul (value, CmplxOps::expand (s)) }; }
//==============================================================================
/** Returns the bit-and of the receiver and v. */
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator& (vMaskType v) const noexcept { return { NativeOps::bit_and (value, toVecType (v.value)) }; }
/** Returns the bit-or of the receiver and v. */
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator| (vMaskType v) const noexcept { return { NativeOps::bit_or (value, toVecType (v.value)) }; }
/** Returns the bit-xor of the receiver and v. */
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator^ (vMaskType v) const noexcept { return { NativeOps::bit_xor (value, toVecType (v.value)) }; }
/** Returns a vector where each element is the bit-inverted value of the corresponding element in the receiver.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator~() const noexcept { return { NativeOps::bit_not (value) }; }
//==============================================================================
/** Returns a vector where each element is the bit-and'd value of the corresponding element in the receiver and the scalar s.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator& (MaskType s) const noexcept { return { NativeOps::bit_and (value, toVecType (s)) }; }
/** Returns a vector where each element is the bit-or'd value of the corresponding element in the receiver and the scalar s.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator| (MaskType s) const noexcept { return { NativeOps::bit_or (value, toVecType (s)) }; }
/** Returns a vector where each element is the bit-xor'd value of the corresponding element in the receiver and the scalar s.*/
inline SIMDRegister JUCE_VECTOR_CALLTYPE operator^ (MaskType s) const noexcept { return { NativeOps::bit_xor (value, toVecType (s)) }; }
//==============================================================================
/** Returns true if all elements-wise comparisons return true. */
inline bool JUCE_VECTOR_CALLTYPE operator== (SIMDRegister other) const noexcept { return NativeOps::allEqual (value, other.value); }
/** Returns true if any elements-wise comparisons return false. */
inline bool JUCE_VECTOR_CALLTYPE operator!= (SIMDRegister other) const noexcept { return ! (*this == other); }
/** Returns true if all elements are equal to the scalar. */
inline bool JUCE_VECTOR_CALLTYPE operator== (Type s) const noexcept { return *this == SIMDRegister::expand (s); }
/** Returns true if any elements are not equal to the scalar. */
inline bool JUCE_VECTOR_CALLTYPE operator!= (Type s) const noexcept { return ! (*this == s); }
//==============================================================================
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
if the corresponding element of a is equal to the corresponding element of b, or zero otherwise.
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
static inline vMaskType JUCE_VECTOR_CALLTYPE equal (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::equal (a.value, b.value)); }
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
if the corresponding element of a is not equal to the corresponding element of b, or zero otherwise.
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
static inline vMaskType JUCE_VECTOR_CALLTYPE notEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::notEqual (a.value, b.value)); }
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
if the corresponding element of a is less than to the corresponding element of b, or zero otherwise.
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
static inline vMaskType JUCE_VECTOR_CALLTYPE lessThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (b.value, a.value)); }
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
if the corresponding element of a is than or equal to the corresponding element of b, or zero otherwise.
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
static inline vMaskType JUCE_VECTOR_CALLTYPE lessThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (b.value, a.value)); }
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
if the corresponding element of a is greater than to the corresponding element of b, or zero otherwise.
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
static inline vMaskType JUCE_VECTOR_CALLTYPE greaterThan (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThan (a.value, b.value)); }
/** Returns a SIMDRegister of the corresponding integral type where each element has each bit set
if the corresponding element of a is greater than or equal to the corresponding element of b, or zero otherwise.
The result can then be used in bit operations defined above to avoid branches in vector SIMD code. */
static inline vMaskType JUCE_VECTOR_CALLTYPE greaterThanOrEqual (SIMDRegister a, SIMDRegister b) noexcept { return toMaskType (NativeOps::greaterThanOrEqual (a.value, b.value)); }
//==============================================================================
/** Returns a new vector where each element is the minimum of the corresponding element of a and b. */
static inline SIMDRegister JUCE_VECTOR_CALLTYPE min (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::min (a.value, b.value) }; }
/** Returns a new vector where each element is the maximum of the corresponding element of a and b. */
static inline SIMDRegister JUCE_VECTOR_CALLTYPE max (SIMDRegister a, SIMDRegister b) noexcept { return { NativeOps::max (a.value, b.value) }; }
//==============================================================================
/** Multiplies b and c and adds the result to a. */
static inline SIMDRegister JUCE_VECTOR_CALLTYPE multiplyAdd (SIMDRegister a, const SIMDRegister b, SIMDRegister c) noexcept
{
return { CmplxOps::muladd (a.value, b.value, c.value) };
}
//==============================================================================
/** Returns a scalar which is the sum of all elements of the receiver. */
inline ElementType sum() const noexcept { return CmplxOps::sum (value); }
//==============================================================================
/** Checks if the given pointer is suffeciently aligned for using SIMD operations. */
static inline bool isSIMDAligned (const ElementType* ptr) noexcept
{
uintptr_t bitmask = SIMDRegisterSize - 1;
return (reinterpret_cast<uintptr_t> (ptr) & bitmask) == 0;
}
/** Returns the next position in memory where isSIMDAligned returns true.
If the current position in memory is already aligned then this method
will simply return the pointer.
*/
static inline ElementType* getNextSIMDAlignedPtr (ElementType* ptr) noexcept
{
return snapPointerToAlignment (ptr, SIMDRegisterSize);
}
#ifndef DOXYGEN
static inline const ElementType* getNextSIMDAlignedPtr (const ElementType* ptr) noexcept
{
return snapPointerToAlignment (ptr, SIMDRegisterSize);
}
#endif
private:
static inline vMaskType JUCE_VECTOR_CALLTYPE toMaskType (vSIMDType a) noexcept
{
union
{
vSIMDType in;
vMaskSIMDType out;
} u;
u.in = a;
return vMaskType::fromNative (u.out);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE toVecType (vMaskSIMDType a) noexcept
{
union
{
vMaskSIMDType in;
vSIMDType out;
} u;
u.in = a;
return u.out;
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE toVecType (MaskType a) noexcept
{
union
{
vMaskSIMDType in;
vSIMDType out;
} u;
u.in = CmplxSIMDOps<MaskType>::expand (a);
return u.out;
}
};
} // namespace dsp
} // namespace juce
#ifndef DOXYGEN
#include "juce_SIMDRegister_Impl.h"
#endif

View File

@ -0,0 +1,178 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2017 - ROLI Ltd.
JUCE is an open source library subject to commercial or open-source
licensing.
By using JUCE, you agree to the terms of both the JUCE 5 End-User License
Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
27th April 2017).
End User License Agreement: www.juce.com/juce-5-licence
Privacy Policy: www.juce.com/juce-5-privacy-policy
Or: You may also use this code under the terms of the GPL v3 (see
www.gnu.org/licenses).
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
DISCLAIMED.
==============================================================================
*/
namespace juce
{
namespace dsp
{
//==============================================================================
template <typename Type>
struct SIMDRegister<Type>::ElementAccess
{
operator Type() const { return simd.get (idx); }
ElementAccess& operator= (Type scalar) noexcept { simd.set (idx, scalar); return *this; }
ElementAccess& operator= (ElementAccess& o) noexcept { return operator= ((Type) o); }
private:
friend struct SIMDRegister;
ElementAccess (SIMDRegister& owner, size_t index) noexcept : simd (owner), idx (index) {}
SIMDRegister& simd;
size_t idx;
};
#ifndef DOXYGEN
//==============================================================================
/* This class is used internally by SIMDRegister to abstract away differences
in operations which are different for complex and pure floating point types. */
// the pure floating-point version
template <typename Scalar>
struct CmplxSIMDOps
{
typedef typename SIMDNativeOps<Scalar>::vSIMDType vSIMDType;
static inline vSIMDType JUCE_VECTOR_CALLTYPE load (const Scalar* a) noexcept
{
return SIMDNativeOps<Scalar>::load (a);
}
static inline void JUCE_VECTOR_CALLTYPE store (vSIMDType value, Scalar* dest) noexcept
{
SIMDNativeOps<Scalar>::store (value, dest);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE expand (Scalar s) noexcept
{
return SIMDNativeOps<Scalar>::expand (s);
}
static inline Scalar JUCE_VECTOR_CALLTYPE get (vSIMDType v, std::size_t i) noexcept
{
return SIMDNativeOps<Scalar>::get (v, i);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE set (vSIMDType v, std::size_t i, Scalar s) noexcept
{
return SIMDNativeOps<Scalar>::set (v, i, s);
}
static inline Scalar JUCE_VECTOR_CALLTYPE sum (vSIMDType a) noexcept
{
return SIMDNativeOps<Scalar>::sum (a);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE mul (vSIMDType a, vSIMDType b) noexcept
{
return SIMDNativeOps<Scalar>::mul (a, b);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE muladd (vSIMDType a, vSIMDType b, vSIMDType c) noexcept
{
return SIMDNativeOps<Scalar>::multiplyAdd (a, b, c);
}
};
// The pure complex version
template <typename Scalar>
struct CmplxSIMDOps<std::complex<Scalar>>
{
typedef typename SIMDNativeOps<Scalar>::vSIMDType vSIMDType;
static inline vSIMDType JUCE_VECTOR_CALLTYPE load (const std::complex<Scalar>* a) noexcept
{
return SIMDNativeOps<Scalar>::load (reinterpret_cast<const Scalar*> (a));
}
static inline void JUCE_VECTOR_CALLTYPE store (vSIMDType value, std::complex<Scalar>* dest) noexcept
{
SIMDNativeOps<Scalar>::store (value, reinterpret_cast<Scalar*> (dest));
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE expand (std::complex<Scalar> s) noexcept
{
const int n = sizeof (vSIMDType) / sizeof (Scalar);
union
{
vSIMDType v;
Scalar floats[n];
} u;
for (int i = 0; i < n; ++i)
u.floats[i] = (i & 1) == 0 ? s.real() : s.imag();
return u.v;
}
static inline std::complex<Scalar> JUCE_VECTOR_CALLTYPE get (vSIMDType v, std::size_t i) noexcept
{
auto j = i << 1;
return std::complex<Scalar> (SIMDNativeOps<Scalar>::get (v, j), SIMDNativeOps<Scalar>::get (v, j + 1));
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE set (vSIMDType v, std::size_t i, std::complex<Scalar> s) noexcept
{
auto j = i << 1;
return SIMDNativeOps<Scalar>::set (SIMDNativeOps<Scalar>::set (v, j, s.real()), j + 1, s.imag());
}
static inline std::complex<Scalar> JUCE_VECTOR_CALLTYPE sum (vSIMDType a) noexcept
{
vSIMDType result = SIMDNativeOps<Scalar>::oddevensum (a);
auto* ptr = reinterpret_cast<const Scalar*> (&result);
return std::complex<Scalar> (ptr[0], ptr[1]);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE mul (vSIMDType a, vSIMDType b) noexcept
{
return SIMDNativeOps<Scalar>::cmplxmul (a, b);
}
static inline vSIMDType JUCE_VECTOR_CALLTYPE muladd (vSIMDType a, vSIMDType b, vSIMDType c) noexcept
{
return SIMDNativeOps<Scalar>::add (a, SIMDNativeOps<Scalar>::cmplxmul (b, c));
}
};
#endif
//==============================================================================
namespace util
{
template <typename Type>
inline void snapToZero (SIMDRegister<Type>&) noexcept {}
}
} // namespace dsp
// Extend some common used global functions to SIMDRegister types
template <typename Type>
inline dsp::SIMDRegister<Type> JUCE_VECTOR_CALLTYPE jmin (dsp::SIMDRegister<Type> a, dsp::SIMDRegister<Type> b) { return dsp::SIMDRegister<Type>::min (a, b); }
template <typename Type>
inline dsp::SIMDRegister<Type> JUCE_VECTOR_CALLTYPE jmax (dsp::SIMDRegister<Type> a, dsp::SIMDRegister<Type> b) { return dsp::SIMDRegister<Type>::max (a, b); }
} // namespace juce

View File

@ -0,0 +1,842 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2017 - ROLI Ltd.
JUCE is an open source library subject to commercial or open-source
licensing.
By using JUCE, you agree to the terms of both the JUCE 5 End-User License
Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
27th April 2017).
End User License Agreement: www.juce.com/juce-5-licence
Privacy Policy: www.juce.com/juce-5-privacy-policy
Or: You may also use this code under the terms of the GPL v3 (see
www.gnu.org/licenses).
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
DISCLAIMED.
==============================================================================
*/
namespace juce
{
namespace dsp
{
namespace SIMDRegister_test_internal
{
template <typename type, typename = void> struct RandomPrimitive {};
template <typename type>
struct RandomPrimitive<type, typename std::enable_if<std::is_floating_point<type>::value>::type>
{
static type next (Random& random)
{
return static_cast<type> (std::is_signed<type>::value ? (random.nextFloat() * 16.0) - 8.0
: (random.nextFloat() * 8.0));
}
};
template <typename type>
struct RandomPrimitive<type, typename std::enable_if<std::is_integral<type>::value>::type>
{
static type next (Random& random)
{
return static_cast<type> (random.nextInt64());
}
};
template <typename type> struct RandomValue { static type next (Random& random) { return RandomPrimitive<type>::next (random); } };
template <typename type>
struct RandomValue<std::complex<type>>
{
static std::complex<type> next (Random& random)
{
return {RandomPrimitive<type>::next (random), RandomPrimitive<type>::next (random)};
}
};
template <typename type>
struct VecFiller
{
static void fill (type* dst, const int size, Random& random)
{
for (int i = 0; i < size; ++i)
dst[i] = RandomValue<type>::next (random);
}
};
// We need to specialise for complex types: otherwise GCC 6 gives
// us an ICE internal compiler error after which the compiler seg faults.
template <typename type>
struct VecFiller<std::complex<type>>
{
static void fill (std::complex<type>* dst, const int size, Random& random)
{
for (int i = 0; i < size; ++i)
dst[i] = std::complex<type> (RandomValue<type>::next (random), RandomValue<type>::next (random));
}
};
template <typename type>
struct VecFiller<SIMDRegister<type>>
{
static SIMDRegister<type> fill(Random& random)
{
constexpr int size = (int) SIMDRegister<type>::SIMDNumElements;
#ifdef _MSC_VER
__declspec(align(sizeof (SIMDRegister<type>))) type elements[size];
#else
type elements[size] __attribute__((aligned(sizeof (SIMDRegister<type>))));
#endif
VecFiller<type>::fill (elements, size, random);
return SIMDRegister<type>::fromRawArray (elements);
}
};
// Avoid visual studio warning
template <typename type>
static type safeAbs (type a)
{
return static_cast<type> (std::abs (static_cast<double> (a)));
}
template <typename type>
static type safeAbs (std::complex<type> a)
{
return std::abs (a);
}
template <typename type>
static double difference (type a)
{
return static_cast<double> (safeAbs (a));
}
template <typename type>
static double difference (type a, type b)
{
return difference (a - b);
}
}
// These tests need to be strictly run on all platforms supported by JUCE as the
// SIMD code is highly platform dependant.
class SIMDRegisterUnitTests : public UnitTest
{
public:
SIMDRegisterUnitTests() : UnitTest ("SIMDRegister UnitTests", "DSP") {}
//==============================================================================
// Some helper classes
template <typename type>
static bool allValuesEqualTo (const SIMDRegister<type>& vec, const type scalar)
{
#ifdef _MSC_VER
__declspec(align(sizeof (SIMDRegister<type>))) type elements[SIMDRegister<type>::SIMDNumElements];
#else
type elements[SIMDRegister<type>::SIMDNumElements] __attribute__((aligned(sizeof (SIMDRegister<type>))));
#endif
vec.copyToRawArray (elements);
// as we do not want to rely on the access operator we cast this to a primitive pointer
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
if (elements[i] != scalar) return false;
return true;
}
template <typename type>
static bool vecEqualToArray (const SIMDRegister<type>& vec, const type* array)
{
HeapBlock<type> vecElementsStorage (SIMDRegister<type>::SIMDNumElements * 2);
auto* ptr = SIMDRegister<type>::getNextSIMDAlignedPtr (vecElementsStorage.getData());
vec.copyToRawArray (ptr);
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
{
double delta = SIMDRegister_test_internal::difference (ptr[i], array[i]);
if (delta > 1e-4)
{
DBG ("a: " << SIMDRegister_test_internal::difference (ptr[i]) << " b: " << SIMDRegister_test_internal::difference (array[i]) << " difference: " << delta);
return false;
}
}
return true;
}
template <typename type>
static void copy (SIMDRegister<type>& vec, const type* ptr)
{
if (SIMDRegister<type>::isSIMDAligned (ptr))
{
vec = SIMDRegister<type>::fromRawArray (ptr);
}
else
{
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
vec[i] = ptr[i];
}
}
//==============================================================================
// Someuseful operations to test
struct Addition
{
template <typename typeOne, typename typeTwo>
static void inplace (typeOne& a, const typeTwo& b)
{
a += b;
}
template <typename typeOne, typename typeTwo>
static typeOne outofplace (const typeOne& a, const typeTwo& b)
{
return a + b;
}
};
struct Subtraction
{
template <typename typeOne, typename typeTwo>
static void inplace (typeOne& a, const typeTwo& b)
{
a -= b;
}
template <typename typeOne, typename typeTwo>
static typeOne outofplace (const typeOne& a, const typeTwo& b)
{
return a - b;
}
};
struct Multiplication
{
template <typename typeOne, typename typeTwo>
static void inplace (typeOne& a, const typeTwo& b)
{
a *= b;
}
template <typename typeOne, typename typeTwo>
static typeOne outofplace (const typeOne& a, const typeTwo& b)
{
return a * b;
}
};
struct BitAND
{
template <typename typeOne, typename typeTwo>
static void inplace (typeOne& a, const typeTwo& b)
{
a &= b;
}
template <typename typeOne, typename typeTwo>
static typeOne outofplace (const typeOne& a, const typeTwo& b)
{
return a & b;
}
};
struct BitOR
{
template <typename typeOne, typename typeTwo>
static void inplace (typeOne& a, const typeTwo& b)
{
a |= b;
}
template <typename typeOne, typename typeTwo>
static typeOne outofplace (const typeOne& a, const typeTwo& b)
{
return a | b;
}
};
struct BitXOR
{
template <typename typeOne, typename typeTwo>
static void inplace (typeOne& a, const typeTwo& b)
{
a ^= b;
}
template <typename typeOne, typename typeTwo>
static typeOne outofplace (const typeOne& a, const typeTwo& b)
{
return a ^ b;
}
};
//==============================================================================
// the individual tests
struct InitializationTest
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
u.expect (allValuesEqualTo<type> (SIMDRegister<type>::expand (static_cast<type> (23)), 23));
{
#ifdef _MSC_VER
__declspec(align(sizeof (SIMDRegister<type>))) type elements[SIMDRegister<type>::SIMDNumElements];
#else
type elements[SIMDRegister<type>::SIMDNumElements] __attribute__((aligned(sizeof (SIMDRegister<type>))));
#endif
SIMDRegister_test_internal::VecFiller<type>::fill (elements, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister<type> a (SIMDRegister<type>::fromRawArray (elements));
u.expect (vecEqualToArray (a, elements));
SIMDRegister<type> b (a);
a *= static_cast<type> (2);
u.expect (vecEqualToArray (b, elements));
}
}
};
struct AccessTest
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
// set-up
SIMDRegister<type> a;
type array [SIMDRegister<type>::SIMDNumElements];
SIMDRegister_test_internal::VecFiller<type>::fill (array, SIMDRegister<type>::SIMDNumElements, random);
// Test non-const access operator
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
a[i] = array[i];
u.expect (vecEqualToArray (a, array));
// Test const access operator
const SIMDRegister<type>& b = a;
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
u.expect (b[i] == array[i]);
}
};
template <class Operation>
struct OperatorTests
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
for (int n = 0; n < 100; ++n)
{
// set-up
SIMDRegister<type> a (static_cast<type> (0));
SIMDRegister<type> b (static_cast<type> (0));
SIMDRegister<type> c (static_cast<type> (0));
type array_a [SIMDRegister<type>::SIMDNumElements];
type array_b [SIMDRegister<type>::SIMDNumElements];
type array_c [SIMDRegister<type>::SIMDNumElements];
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_b, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_c, SIMDRegister<type>::SIMDNumElements, random);
copy (a, array_a); copy (b, array_b); copy (c, array_c);
// test in-place with both params being vectors
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
Operation::template inplace<type, type> (array_a[i], array_b[i]);
Operation::template inplace<SIMDRegister<type>, SIMDRegister<type>> (a, b);
u.expect (vecEqualToArray (a, array_a));
u.expect (vecEqualToArray (b, array_b));
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_b, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_c, SIMDRegister<type>::SIMDNumElements, random);
copy (a, array_a); copy (b, array_b); copy (c, array_c);
// test in-place with one param being scalar
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
Operation::template inplace<type, type> (array_b[i], static_cast<type> (2));
Operation::template inplace<SIMDRegister<type>, type> (b, 2);
u.expect (vecEqualToArray (a, array_a));
u.expect (vecEqualToArray (b, array_b));
// set-up again
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_b, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_c, SIMDRegister<type>::SIMDNumElements, random);
copy (a, array_a); copy (b, array_b); copy (c, array_c);
// test out-of-place with both params being vectors
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
array_c[i] = Operation::template outofplace<type, type> (array_a[i], array_b[i]);
c = Operation::template outofplace<SIMDRegister<type>, SIMDRegister<type>> (a, b);
u.expect (vecEqualToArray (a, array_a));
u.expect (vecEqualToArray (b, array_b));
u.expect (vecEqualToArray (c, array_c));
// test out-of-place with one param being scalar
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
array_c[i] = Operation::template outofplace<type, type> (array_b[i], static_cast<type> (2));
c = Operation::template outofplace<SIMDRegister<type>, type> (b, 2);
u.expect (vecEqualToArray (a, array_a));
u.expect (vecEqualToArray (b, array_b));
u.expect (vecEqualToArray (c, array_c));
}
}
};
template <class Operation>
struct BitOperatorTests
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
typedef typename SIMDRegister<type>::vMaskType vMaskType;
typedef typename SIMDRegister<type>::MaskType MaskType;
for (int n = 0; n < 100; ++n)
{
// Check flip sign bit and using as a union
{
type array_a [SIMDRegister<type>::SIMDNumElements];
union ConversionUnion
{
inline ConversionUnion() : floatVersion (static_cast<type> (0)) {}
inline ~ConversionUnion() {}
SIMDRegister<type> floatVersion;
vMaskType intVersion;
} a, b;
vMaskType bitmask = vMaskType::expand (static_cast<MaskType> (1) << (sizeof (MaskType) - 1));
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
copy (a.floatVersion, array_a);
copy (b.floatVersion, array_a);
Operation::template inplace<SIMDRegister<type>, vMaskType> (a.floatVersion, bitmask);
Operation::template inplace<vMaskType, vMaskType> (b.intVersion, bitmask);
#ifdef _MSC_VER
__declspec(align(sizeof (SIMDRegister<type>))) type elements[SIMDRegister<type>::SIMDNumElements];
#else
type elements[SIMDRegister<type>::SIMDNumElements] __attribute__((aligned(sizeof (SIMDRegister<type>))));
#endif
b.floatVersion.copyToRawArray (elements);
u.expect (vecEqualToArray (a.floatVersion, elements));
}
// set-up
SIMDRegister<type> a, c;
vMaskType b;
MaskType array_a [SIMDRegister<MaskType>::SIMDNumElements];
MaskType array_b [SIMDRegister<MaskType>::SIMDNumElements];
MaskType array_c [SIMDRegister<MaskType>::SIMDNumElements];
type float_a [SIMDRegister<type>::SIMDNumElements];
type float_c [SIMDRegister<type>::SIMDNumElements];
SIMDRegister_test_internal::VecFiller<type>::fill (float_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<MaskType>::fill (array_b, SIMDRegister<MaskType>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (float_c, SIMDRegister<type>::SIMDNumElements, random);
memcpy (array_a, float_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
memcpy (array_c, float_c, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
copy (a, float_a); copy (b, array_b); copy (c, float_c);
// test in-place with both params being vectors
for (size_t i = 0; i < SIMDRegister<MaskType>::SIMDNumElements; ++i)
Operation::template inplace<MaskType, MaskType> (array_a[i], array_b[i]);
memcpy (float_a, array_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
Operation::template inplace<SIMDRegister<type>, vMaskType> (a, b);
u.expect (vecEqualToArray (a, float_a));
u.expect (vecEqualToArray (b, array_b));
SIMDRegister_test_internal::VecFiller<type>::fill (float_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<MaskType>::fill (array_b, SIMDRegister<MaskType>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (float_c, SIMDRegister<type>::SIMDNumElements, random);
memcpy (array_a, float_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
memcpy (array_c, float_c, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
copy (a, float_a); copy (b, array_b); copy (c, float_c);
// test in-place with one param being scalar
for (size_t i = 0; i < SIMDRegister<MaskType>::SIMDNumElements; ++i)
Operation::template inplace<MaskType, MaskType> (array_a[i], static_cast<MaskType> (9));
memcpy (float_a, array_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
Operation::template inplace<SIMDRegister<type>, MaskType> (a, static_cast<MaskType> (9));
u.expect (vecEqualToArray (a, float_a));
u.expect (vecEqualToArray (b, array_b));
// set-up again
SIMDRegister_test_internal::VecFiller<type>::fill (float_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<MaskType>::fill (array_b, SIMDRegister<MaskType>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (float_c, SIMDRegister<type>::SIMDNumElements, random);
memcpy (array_a, float_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
memcpy (array_c, float_c, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
copy (a, float_a); copy (b, array_b); copy (c, float_c);
// test out-of-place with both params being vectors
for (size_t i = 0; i < SIMDRegister<MaskType>::SIMDNumElements; ++i)
{
array_c[i] =
Operation::template outofplace<MaskType, MaskType> (array_a[i], array_b[i]);
}
memcpy (float_a, array_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
memcpy (float_c, array_c, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
c = Operation::template outofplace<SIMDRegister<type>, vMaskType> (a, b);
u.expect (vecEqualToArray (a, float_a));
u.expect (vecEqualToArray (b, array_b));
u.expect (vecEqualToArray (c, float_c));
// test out-of-place with one param being scalar
for (size_t i = 0; i < SIMDRegister<MaskType>::SIMDNumElements; ++i)
array_c[i] = Operation::template outofplace<MaskType, MaskType> (array_a[i], static_cast<MaskType> (9));
memcpy (float_a, array_a, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
memcpy (float_c, array_c, sizeof (type) * SIMDRegister<type>::SIMDNumElements);
c = Operation::template outofplace<SIMDRegister<type>, MaskType> (a, static_cast<MaskType> (9));
u.expect (vecEqualToArray (a, float_a));
u.expect (vecEqualToArray (b, array_b));
u.expect (vecEqualToArray (c, float_c));
}
}
};
struct CheckComparisonOps
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
typedef typename SIMDRegister<type>::vMaskType vMaskType;
typedef typename SIMDRegister<type>::MaskType MaskType;
for (int i = 0; i < 100; ++i)
{
// set-up
type array_a [SIMDRegister<type>::SIMDNumElements];
type array_b [SIMDRegister<type>::SIMDNumElements];
MaskType array_eq [SIMDRegister<type>::SIMDNumElements];
MaskType array_neq [SIMDRegister<type>::SIMDNumElements];
MaskType array_lt [SIMDRegister<type>::SIMDNumElements];
MaskType array_le [SIMDRegister<type>::SIMDNumElements];
MaskType array_gt [SIMDRegister<type>::SIMDNumElements];
MaskType array_ge [SIMDRegister<type>::SIMDNumElements];
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_b, SIMDRegister<type>::SIMDNumElements, random);
// do check
for (size_t j = 0; j < SIMDRegister<type>::SIMDNumElements; ++j)
{
array_eq [j] = (array_a[j] == array_b[j]) ? static_cast<MaskType> (-1) : 0;
array_neq [j] = (array_a[j] != array_b[j]) ? static_cast<MaskType> (-1) : 0;
array_lt [j] = (array_a[j] < array_b[j]) ? static_cast<MaskType> (-1) : 0;
array_le [j] = (array_a[j] <= array_b[j]) ? static_cast<MaskType> (-1) : 0;
array_gt [j] = (array_a[j] > array_b[j]) ? static_cast<MaskType> (-1) : 0;
array_ge [j] = (array_a[j] >= array_b[j]) ? static_cast<MaskType> (-1) : 0;
}
SIMDRegister<type> a (static_cast<type> (0));
SIMDRegister<type> b (static_cast<type> (0));
vMaskType eq, neq, lt, le, gt, ge;
copy (a, array_a);
copy (b, array_b);
eq = SIMDRegister<type>::equal (a, b);
neq = SIMDRegister<type>::notEqual (a, b);
lt = SIMDRegister<type>::lessThan (a, b);
le = SIMDRegister<type>::lessThanOrEqual (a, b);
gt = SIMDRegister<type>::greaterThan (a, b);
ge = SIMDRegister<type>::greaterThanOrEqual (a, b);
u.expect (vecEqualToArray (eq, array_eq ));
u.expect (vecEqualToArray (neq, array_neq));
u.expect (vecEqualToArray (lt, array_lt ));
u.expect (vecEqualToArray (le, array_le ));
u.expect (vecEqualToArray (gt, array_gt ));
u.expect (vecEqualToArray (ge, array_ge ));
do
{
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_b, SIMDRegister<type>::SIMDNumElements, random);
} while (std::equal (array_a, array_a + SIMDRegister<type>::SIMDNumElements, array_b));
copy (a, array_a);
copy (b, array_b);
u.expect (a != b);
u.expect (b != a);
u.expect (! (a == b));
u.expect (! (b == a));
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
copy (a, array_a);
copy (b, array_a);
u.expect (a == b);
u.expect (b == a);
u.expect (! (a != b));
u.expect (! (b != a));
type scalar = a[0];
a = SIMDRegister<type>::expand (scalar);
u.expect (a == scalar);
u.expect (! (a != scalar));
scalar--;
u.expect (a != scalar);
u.expect (! (a == scalar));
}
}
};
struct CheckMultiplyAdd
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
// set-up
type array_a [SIMDRegister<type>::SIMDNumElements];
type array_b [SIMDRegister<type>::SIMDNumElements];
type array_c [SIMDRegister<type>::SIMDNumElements];
type array_d [SIMDRegister<type>::SIMDNumElements];
SIMDRegister_test_internal::VecFiller<type>::fill (array_a, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_b, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_c, SIMDRegister<type>::SIMDNumElements, random);
SIMDRegister_test_internal::VecFiller<type>::fill (array_d, SIMDRegister<type>::SIMDNumElements, random);
// check
for (size_t i = 0; i < SIMDRegister<type>::SIMDNumElements; ++i)
array_d[i] = array_a[i] + (array_b[i] * array_c[i]);
SIMDRegister<type> a, b, c, d;
copy (a, array_a);
copy (b, array_b);
copy (c, array_c);
d = SIMDRegister<type>::multiplyAdd (a, b, c);
u.expect (vecEqualToArray (d, array_d));
}
};
struct CheckMinMax
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
for (int i = 0; i < 100; ++i)
{
type array_a [SIMDRegister<type>::SIMDNumElements];
type array_b [SIMDRegister<type>::SIMDNumElements];
type array_min [SIMDRegister<type>::SIMDNumElements];
type array_max [SIMDRegister<type>::SIMDNumElements];
for (size_t j = 0; j < SIMDRegister<type>::SIMDNumElements; ++j)
{
array_a[j] = static_cast<type> (random.nextInt (127));
array_b[j] = static_cast<type> (random.nextInt (127));
}
for (size_t j = 0; j < SIMDRegister<type>::SIMDNumElements; ++j)
{
array_min[j] = (array_a[j] < array_b[j]) ? array_a[j] : array_b[j];
array_max[j] = (array_a[j] > array_b[j]) ? array_a[j] : array_b[j];
}
SIMDRegister<type> a (static_cast<type> (0));
SIMDRegister<type> b (static_cast<type> (0));
SIMDRegister<type> vMin (static_cast<type> (0));
SIMDRegister<type> vMax (static_cast<type> (0));
copy (a, array_a);
copy (b, array_b);
vMin = jmin (a, b);
vMax = jmax (a, b);
u.expect (vecEqualToArray (vMin, array_min));
u.expect (vecEqualToArray (vMax, array_max));
copy (vMin, array_a);
copy (vMax, array_a);
vMin = SIMDRegister<type>::min (a, b);
vMax = SIMDRegister<type>::max (a, b);
u.expect (vecEqualToArray (vMin, array_min));
u.expect (vecEqualToArray (vMax, array_max));
}
}
};
struct CheckSum
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
type array [SIMDRegister<type>::SIMDNumElements];
type sumCheck = 0;
SIMDRegister_test_internal::VecFiller<type>::fill (array, SIMDRegister<type>::SIMDNumElements, random);
for (size_t j = 0; j < SIMDRegister<type>::SIMDNumElements; ++j)
{
sumCheck += array[j];
}
SIMDRegister<type> a;
copy (a, array);
u.expect (SIMDRegister_test_internal::difference (sumCheck, a.sum()) < 1e-4);
}
};
struct CheckBoolEquals
{
template <typename type>
static void run (UnitTest& u, Random& random)
{
bool is_signed = std::is_signed<type>::value;
type array [SIMDRegister<type>::SIMDNumElements];
auto value = is_signed ? static_cast<type> ((random.nextFloat() * 16.0) - 8.0)
: static_cast<type> (random.nextFloat() * 8.0);
std::fill (array, array + SIMDRegister<type>::SIMDNumElements, value);
SIMDRegister<type> a, b;
copy (a, array);
u.expect (a == value);
u.expect (! (a != value));
value += 1;
u.expect (a != value);
u.expect (! (a == value));
SIMDRegister_test_internal::VecFiller<type>::fill (array, SIMDRegister<type>::SIMDNumElements, random);
copy (a, array);
copy (b, array);
u.expect (a == b);
u.expect (! (a != b));
SIMDRegister_test_internal::VecFiller<type>::fill (array, SIMDRegister<type>::SIMDNumElements, random);
copy (b, array);
u.expect (a != b);
u.expect (! (a == b));
}
};
//==============================================================================
template <class TheTest>
void runTestForAllTypes (const char* unitTestName)
{
beginTest (unitTestName);
Random random = getRandom();
TheTest::template run<float> (*this, random);
TheTest::template run<double> (*this, random);
TheTest::template run<int8_t> (*this, random);
TheTest::template run<uint8_t> (*this, random);
TheTest::template run<int16_t> (*this, random);
TheTest::template run<uint16_t>(*this, random);
TheTest::template run<int32_t> (*this, random);
TheTest::template run<uint32_t>(*this, random);
TheTest::template run<int64_t> (*this, random);
TheTest::template run<uint64_t>(*this, random);
TheTest::template run<std::complex<float>> (*this, random);
TheTest::template run<std::complex<double>> (*this, random);
}
template <class TheTest>
void runTestNonComplex (const char* unitTestName)
{
beginTest (unitTestName);
Random random = getRandom();
TheTest::template run<float> (*this, random);
TheTest::template run<double> (*this, random);
TheTest::template run<int8_t> (*this, random);
TheTest::template run<uint8_t> (*this, random);
TheTest::template run<int16_t> (*this, random);
TheTest::template run<uint16_t>(*this, random);
TheTest::template run<int32_t> (*this, random);
TheTest::template run<uint32_t>(*this, random);
TheTest::template run<int64_t> (*this, random);
TheTest::template run<uint64_t>(*this, random);
}
void runTest()
{
runTestForAllTypes<InitializationTest> ("InitializationTest");
runTestForAllTypes<AccessTest> ("AccessTest");
runTestForAllTypes<OperatorTests<Addition>> ("AdditionOperators");
runTestForAllTypes<OperatorTests<Subtraction>> ("SubtractionOperators");
runTestForAllTypes<OperatorTests<Multiplication>> ("MultiplicationOperators");
runTestForAllTypes<BitOperatorTests<BitAND>> ("BitANDOperators");
runTestForAllTypes<BitOperatorTests<BitOR>> ("BitOROperators");
runTestForAllTypes<BitOperatorTests<BitXOR>> ("BitXOROperators");
runTestNonComplex<CheckComparisonOps> ("CheckComparisons");
runTestNonComplex<CheckBoolEquals> ("CheckBoolEquals");
runTestNonComplex<CheckMinMax> ("CheckMinMax");
runTestForAllTypes<CheckMultiplyAdd> ("CheckMultiplyAdd");
runTestForAllTypes<CheckSum> ("CheckSum");
}
};
static SIMDRegisterUnitTests SIMDRegisterUnitTests;
} // namespace dsp
} // namespace juce