summaryrefslogtreecommitdiffstats
path: root/src/corelib/global/qnumeric_p.h
diff options
context:
space:
mode:
authorUlf Hermann <ulf.hermann@qt.io>2020-12-01 11:54:43 +0100
committerUlf Hermann <ulf.hermann@qt.io>2020-12-07 15:37:25 +0100
commit2b8674a2fc67089ff87d3751ef2977cbb37616a2 (patch)
treec767cf155ab1f8acee28c3aeb40ea82701afe65d /src/corelib/global/qnumeric_p.h
parent9082cc8e8d5a6441dabe5e7a95bc0cd9085b95fe (diff)
Make the overflow math functions public
As the standard library does not provide equivalent functionality, those functions are really useful to everyone, not only to Qt itself. [ChangeLog][QtCore] The overflow-safe math functions qAddOverflow(), qSubOverflow(), qMulOverflow() were added. Change-Id: I5a0a4742dae9b6f0caa65d0e93edcf3658bee9f8 Reviewed-by: Fabian Kosmale <fabian.kosmale@qt.io> Reviewed-by: Lars Knoll <lars.knoll@qt.io>
Diffstat (limited to 'src/corelib/global/qnumeric_p.h')
-rw-r--r--src/corelib/global/qnumeric_p.h245
1 files changed, 10 insertions, 235 deletions
diff --git a/src/corelib/global/qnumeric_p.h b/src/corelib/global/qnumeric_p.h
index a11057dfff..823a1812de 100644
--- a/src/corelib/global/qnumeric_p.h
+++ b/src/corelib/global/qnumeric_p.h
@@ -53,29 +53,11 @@
//
#include "QtCore/private/qglobal_p.h"
+#include "QtCore/qnumeric.h"
#include <cmath>
#include <limits>
#include <type_traits>
-#if defined(Q_CC_MSVC)
-# include <intrin.h>
-# include <float.h>
-# if defined(Q_PROCESSOR_X86_64) || defined(Q_PROCESSOR_ARM_64)
-# define Q_INTRINSIC_MUL_OVERFLOW64
-# define Q_UMULH(v1, v2) __umulh(v1, v2);
-# define Q_SMULH(v1, v2) __mulh(v1, v2);
-# pragma intrinsic(__umulh)
-# pragma intrinsic(__mulh)
-# endif
-#endif
-
-# if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64)
-#include <arm64_ghs.h>
-# define Q_INTRINSIC_MUL_OVERFLOW64
-# define Q_UMULH(v1, v2) __MULUH64(v1, v2);
-# define Q_SMULH(v1, v2) __MULSH64(v1, v2);
-#endif
-
#if !defined(Q_CC_MSVC) && (defined(Q_OS_QNX) || defined(Q_CC_INTEL))
# include <math.h>
# ifdef isnan
@@ -249,245 +231,38 @@ QT_WARNING_DISABLE_FLOAT_COMPARE
QT_WARNING_POP
}
-// Overflow math.
-// This provides efficient implementations for int, unsigned, qsizetype and
-// size_t. Implementations for 8- and 16-bit types will work but may not be as
-// efficient. Implementations for 64-bit may be missing on 32-bit platforms.
-
-#if ((defined(Q_CC_INTEL) ? (Q_CC_INTEL >= 1800 && !defined(Q_OS_WIN)) : defined(Q_CC_GNU)) \
- && Q_CC_GNU >= 500) || __has_builtin(__builtin_add_overflow)
-// GCC 5, ICC 18, and Clang 3.8 have builtins to detect overflows
-#define Q_INTRINSIC_MUL_OVERFLOW64
-
-template <typename T> inline
-typename std::enable_if<std::is_unsigned<T>::value || std::is_signed<T>::value, bool>::type
-add_overflow(T v1, T v2, T *r)
-{ return __builtin_add_overflow(v1, v2, r); }
-
-template <typename T> inline
-typename std::enable_if<std::is_unsigned<T>::value || std::is_signed<T>::value, bool>::type
-sub_overflow(T v1, T v2, T *r)
-{ return __builtin_sub_overflow(v1, v2, r); }
-
-template <typename T> inline
-typename std::enable_if<std::is_unsigned<T>::value || std::is_signed<T>::value, bool>::type
-mul_overflow(T v1, T v2, T *r)
-{ return __builtin_mul_overflow(v1, v2, r); }
-
-#else
-// Generic implementations
-
-template <typename T> inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
-add_overflow(T v1, T v2, T *r)
-{
- // unsigned additions are well-defined
- *r = v1 + v2;
- return v1 > T(v1 + v2);
-}
-
-template <typename T> inline typename std::enable_if<std::is_signed<T>::value, bool>::type
-add_overflow(T v1, T v2, T *r)
-{
- // Here's how we calculate the overflow:
- // 1) unsigned addition is well-defined, so we can always execute it
- // 2) conversion from unsigned back to signed is implementation-
- // defined and in the implementations we use, it's a no-op.
- // 3) signed integer overflow happens if the sign of the two input operands
- // is the same but the sign of the result is different. In other words,
- // the sign of the result must be the same as the sign of either
- // operand.
-
- using U = typename std::make_unsigned<T>::type;
- *r = T(U(v1) + U(v2));
-
- // If int is two's complement, assume all integer types are too.
- if (std::is_same<int32_t, int>::value) {
- // Two's complement equivalent (generates slightly shorter code):
- // x ^ y is negative if x and y have different signs
- // x & y is negative if x and y are negative
- // (x ^ z) & (y ^ z) is negative if x and z have different signs
- // AND y and z have different signs
- return ((v1 ^ *r) & (v2 ^ *r)) < 0;
- }
-
- bool s1 = (v1 < 0);
- bool s2 = (v2 < 0);
- bool sr = (*r < 0);
- return s1 != sr && s2 != sr;
- // also: return s1 == s2 && s1 != sr;
-}
-
-template <typename T> inline typename std::enable_if<std::is_unsigned<T>::value, bool>::type
-sub_overflow(T v1, T v2, T *r)
-{
- // unsigned subtractions are well-defined
- *r = v1 - v2;
- return v1 < v2;
-}
-
-template <typename T> inline typename std::enable_if<std::is_signed<T>::value, bool>::type
-sub_overflow(T v1, T v2, T *r)
-{
- // See above for explanation. This is the same with some signs reversed.
- // We can't use add_overflow(v1, -v2, r) because it would be UB if
- // v2 == std::numeric_limits<T>::min().
-
- using U = typename std::make_unsigned<T>::type;
- *r = T(U(v1) - U(v2));
-
- if (std::is_same<int32_t, int>::value)
- return ((v1 ^ *r) & (~v2 ^ *r)) < 0;
-
- bool s1 = (v1 < 0);
- bool s2 = !(v2 < 0);
- bool sr = (*r < 0);
- return s1 != sr && s2 != sr;
- // also: return s1 == s2 && s1 != sr;
-}
-
-template <typename T> inline
-typename std::enable_if<std::is_unsigned<T>::value || std::is_signed<T>::value, bool>::type
-mul_overflow(T v1, T v2, T *r)
-{
- // use the next biggest type
- // Note: for 64-bit systems where __int128 isn't supported, this will cause an error.
- using LargerInt = QIntegerForSize<sizeof(T) * 2>;
- using Larger = typename std::conditional<std::is_signed<T>::value,
- typename LargerInt::Signed, typename LargerInt::Unsigned>::type;
- Larger lr = Larger(v1) * Larger(v2);
- *r = T(lr);
- return lr > std::numeric_limits<T>::max() || lr < std::numeric_limits<T>::min();
-}
-
-# if defined(Q_INTRINSIC_MUL_OVERFLOW64)
-template <> inline bool mul_overflow(quint64 v1, quint64 v2, quint64 *r)
-{
- *r = v1 * v2;
- return Q_UMULH(v1, v2);
-}
-template <> inline bool mul_overflow(qint64 v1, qint64 v2, qint64 *r)
-{
- // This is slightly more complex than the unsigned case above: the sign bit
- // of 'low' must be replicated as the entire 'high', so the only valid
- // values for 'high' are 0 and -1. Use unsigned multiply since it's the same
- // as signed for the low bits and use a signed right shift to verify that
- // 'high' is nothing but sign bits that match the sign of 'low'.
-
- qint64 high = Q_SMULH(v1, v2);
- *r = qint64(quint64(v1) * quint64(v2));
- return (*r >> 63) != high;
-}
-
-# if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64)
-template <> inline bool mul_overflow(uint64_t v1, uint64_t v2, uint64_t *r)
-{
- return mul_overflow<quint64>(v1,v2,reinterpret_cast<quint64*>(r));
-}
-
-template <> inline bool mul_overflow(int64_t v1, int64_t v2, int64_t *r)
-{
- return mul_overflow<qint64>(v1,v2,reinterpret_cast<qint64*>(r));
-}
-# endif // OS_INTEGRITY ARM64
-# endif // Q_INTRINSIC_MUL_OVERFLOW64
-
-# if defined(Q_CC_MSVC) && defined(Q_PROCESSOR_X86)
-// We can use intrinsics for the unsigned operations with MSVC
-template <> inline bool add_overflow(unsigned v1, unsigned v2, unsigned *r)
-{ return _addcarry_u32(0, v1, v2, r); }
-
-// 32-bit mul_overflow is fine with the generic code above
-
-template <> inline bool add_overflow(quint64 v1, quint64 v2, quint64 *r)
-{
-# if defined(Q_PROCESSOR_X86_64)
- return _addcarry_u64(0, v1, v2, reinterpret_cast<unsigned __int64 *>(r));
-# else
- uint low, high;
- uchar carry = _addcarry_u32(0, unsigned(v1), unsigned(v2), &low);
- carry = _addcarry_u32(carry, v1 >> 32, v2 >> 32, &high);
- *r = (quint64(high) << 32) | low;
- return carry;
-# endif // !x86-64
-}
-# endif // MSVC X86
-#endif // !GCC
-
-// Implementations for addition, subtraction or multiplication by a
-// compile-time constant. For addition and subtraction, we simply call the code
-// that detects overflow at runtime. For multiplication, we compare to the
-// maximum possible values before multiplying to ensure no overflow happens.
+template <typename T> inline bool add_overflow(T v1, T v2, T *r) { return qAddOverflow(v1, v2, r); }
+template <typename T> inline bool sub_overflow(T v1, T v2, T *r) { return qSubOverflow(v1, v2, r); }
+template <typename T> inline bool mul_overflow(T v1, T v2, T *r) { return qMulOverflow(v1, v2, r); }
template <typename T, T V2> bool add_overflow(T v1, std::integral_constant<T, V2>, T *r)
{
- return add_overflow(v1, V2, r);
+ return qAddOverflow<T, V2>(v1, std::integral_constant<T, V2>{}, r);
}
template <auto V2, typename T> bool add_overflow(T v1, T *r)
{
- return add_overflow(v1, std::integral_constant<T, V2>{}, r);
+ return qAddOverflow<V2, T>(v1, r);
}
template <typename T, T V2> bool sub_overflow(T v1, std::integral_constant<T, V2>, T *r)
{
- return sub_overflow(v1, V2, r);
+ return qSubOverflow<T, V2>(v1, std::integral_constant<T, V2>{}, r);
}
template <auto V2, typename T> bool sub_overflow(T v1, T *r)
{
- return sub_overflow(v1, std::integral_constant<T, V2>{}, r);
+ return qSubOverflow<V2, T>(v1, r);
}
template <typename T, T V2> bool mul_overflow(T v1, std::integral_constant<T, V2>, T *r)
{
- // Runtime detection for anything smaller than or equal to a register
- // width, as most architectures' multiplication instructions actually
- // produce a result twice as wide as the input registers, allowing us to
- // efficiently detect the overflow.
- if constexpr (sizeof(T) <= sizeof(qregisteruint)) {
- return mul_overflow(v1, V2, r);
-
-#ifdef Q_INTRINSIC_MUL_OVERFLOW64
- } else if constexpr (sizeof(T) <= sizeof(quint64)) {
- // If we have intrinsics detecting overflow of 64-bit multiplications,
- // then detect overflows through them up to 64 bits.
- return mul_overflow(v1, V2, r);
-#endif
-
- } else if constexpr (V2 == 0 || V2 == 1) {
- // trivial cases (and simplify logic below due to division by zero)
- *r = v1 * V2;
- return false;
- } else if constexpr (V2 == -1) {
- // multiplication by -1 is valid *except* for signed minimum values
- // (necessary to avoid diving min() by -1, which is an overflow)
- if (v1 < 0 && v1 == std::numeric_limits<T>::min())
- return true;
- *r = -v1;
- return false;
- } else {
- // For 64-bit multiplications on 32-bit platforms, let's instead compare v1
- // against the bounds that would overflow.
- constexpr T Highest = std::numeric_limits<T>::max() / V2;
- constexpr T Lowest = std::numeric_limits<T>::min() / V2;
- if constexpr (Highest > Lowest) {
- if (v1 > Highest || v1 < Lowest)
- return true;
- } else {
- // this can only happen if V2 < 0
- static_assert(V2 < 0);
- if (v1 > Lowest || v1 < Highest)
- return true;
- }
-
- *r = v1 * V2;
- return false;
- }
+ return qMulOverflow<T, V2>(v1, std::integral_constant<T, V2>{}, r);
}
template <auto V2, typename T> bool mul_overflow(T v1, T *r)
{
- return mul_overflow(v1, std::integral_constant<T, V2>{}, r);
+ return qMulOverflow<V2, T>(v1, r);
}
}
#endif // Q_CLANG_QDOC