From 2b8674a2fc67089ff87d3751ef2977cbb37616a2 Mon Sep 17 00:00:00 2001 From: Ulf Hermann Date: Tue, 1 Dec 2020 11:54:43 +0100 Subject: Make the overflow math functions public As the standard library does not provide equivalent functionality, those functions are really useful to everyone, not only to Qt itself. [ChangeLog][QtCore] The overflow-safe math functions qAddOverflow(), qSubOverflow(), qMulOverflow() were added. Change-Id: I5a0a4742dae9b6f0caa65d0e93edcf3658bee9f8 Reviewed-by: Fabian Kosmale Reviewed-by: Lars Knoll --- src/corelib/global/qnumeric.cpp | 94 ++++++++++++++ src/corelib/global/qnumeric.h | 270 ++++++++++++++++++++++++++++++++++++++++ src/corelib/global/qnumeric_p.h | 245 ++---------------------------------- 3 files changed, 374 insertions(+), 235 deletions(-) (limited to 'src/corelib') diff --git a/src/corelib/global/qnumeric.cpp b/src/corelib/global/qnumeric.cpp index e00f0af283..63055a0b61 100644 --- a/src/corelib/global/qnumeric.cpp +++ b/src/corelib/global/qnumeric.cpp @@ -254,5 +254,99 @@ Q_CORE_EXPORT quint64 qFloatDistance(double a, double b) return a > b ? d2i(a) - d2i(b) : d2i(b) - d2i(a); } +/*! + \fn template bool qAddOverflow(T v1, T v2, T *result) + \since 6.1 + + Adds two values \a v1 and \a v2, of a numeric type \c T and records the + value in \a result. If the addition overflows the valid range for type \c T, + returns \c true, otherwise returns \c false. + + An implementation is guaranteed to be available for 8-, 16-, and 32-bit + integer types, as well as integer types of the size of a pointer. Overflow + math for other types, if available, is considered private API. +*/ + +/*! + \fn template bool qAddOverflow(T v1, std::integral_constant, T *r) + \since 6.1 + \internal + + Equivalent to qAddOverflow(v1, v2, r) with \a v1 as first argument, the + compile time constant \c V2 as second argument, and \a r as third argument. +*/ + +/*! + \fn template bool qAddOverflow(T v1, T *r) + \since 6.1 + \internal + + Equivalent to qAddOverflow(v1, v2, r) with \a v1 as first argument, the + compile time constant \c V2 as second argument, and \a r as third argument. +*/ + +/*! + \fn template bool qSubOverflow(T v1, T v2, T *result) + \since 6.1 + + Subtracts \a v2 from \a v1 and records the resulting value in \a result. If + the subtraction overflows the valid range for type \c T, returns \c true, + otherwise returns \c false. + + An implementation is guaranteed to be available for 8-, 16-, and 32-bit + integer types, as well as integer types of the size of a pointer. Overflow + math for other types, if available, is considered private API. +*/ + +/*! + \fn template bool qSubOverflow(T v1, std::integral_constant, T *r) + \since 6.1 + \internal + + Equivalent to qSubOverflow(v1, v2, r) with \a v1 as first argument, the + compile time constant \c V2 as second argument, and \a r as third argument. +*/ + +/*! + \fn template bool qSubOverflow(T v1, T *r) + \since 6.1 + \internal + + Equivalent to qSubOverflow(v1, v2, r) with \a v1 as first argument, the + compile time constant \c V2 as second argument, and \a r as third argument. +*/ + +/*! + \fn template bool qMulOverflow(T v1, T v2, T *result) + \since 6.1 + + Multiplies \a v1 and \a v2, and records the resulting value in \a result. If + the multiplication overflows the valid range for type \c T, returns + \c true, otherwise returns \c false. + + An implementation is guaranteed to be available for 8-, 16-, and 32-bit + integer types, as well as integer types of the size of a pointer. Overflow + math for other types, if available, is considered private API. +*/ + +/*! + \fn template bool qMulOverflow(T v1, std::integral_constant, T *r) + \since 6.1 + \internal + + Equivalent to qMulOverflow(v1, v2, r) with \a v1 as first argument, the + compile time constant \c V2 as second argument, and \a r as third argument. + This can be faster than calling the version with only variable arguments. +*/ + +/*! + \fn template bool qMulOverflow(T v1, T *r) + \since 6.1 + \internal + + Equivalent to qMulOverflow(v1, v2, r) with \a v1 as first argument, the + compile time constant \c V2 as second argument, and \a r as third argument. + This can be faster than calling the version with only variable arguments. +*/ QT_END_NAMESPACE diff --git a/src/corelib/global/qnumeric.h b/src/corelib/global/qnumeric.h index 2771eea64f..28285248c2 100644 --- a/src/corelib/global/qnumeric.h +++ b/src/corelib/global/qnumeric.h @@ -41,6 +41,35 @@ #define QNUMERIC_H #include +#include +#include +#include + +// min() and max() may be #defined by windows.h if that is included before, but we need them +// for std::numeric_limits below. You should not use the min() and max() macros, so we just #undef. +#ifdef min +# undef min +# undef max +#endif + +#if defined(Q_CC_MSVC) +# include +# include +# if defined(Q_PROCESSOR_X86_64) || defined(Q_PROCESSOR_ARM_64) +# define Q_INTRINSIC_MUL_OVERFLOW64 +# define Q_UMULH(v1, v2) __umulh(v1, v2); +# define Q_SMULH(v1, v2) __mulh(v1, v2); +# pragma intrinsic(__umulh) +# pragma intrinsic(__mulh) +# endif +#endif + +# if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64) +# include +# define Q_INTRINSIC_MUL_OVERFLOW64 +# define Q_UMULH(v1, v2) __MULUH64(v1, v2); +# define Q_SMULH(v1, v2) __MULSH64(v1, v2); +#endif QT_BEGIN_NAMESPACE @@ -67,6 +96,247 @@ Q_CORE_EXPORT quint64 qFloatDistance(double a, double b); #endif #define Q_QNAN (QT_PREPEND_NAMESPACE(qQNaN)()) +// Overflow math. +// This provides efficient implementations for int, unsigned, qsizetype and +// size_t. Implementations for 8- and 16-bit types will work but may not be as +// efficient. Implementations for 64-bit may be missing on 32-bit platforms. + +#if ((defined(Q_CC_INTEL) ? (Q_CC_INTEL >= 1800 && !defined(Q_OS_WIN)) : defined(Q_CC_GNU)) \ + && Q_CC_GNU >= 500) || __has_builtin(__builtin_add_overflow) +// GCC 5, ICC 18, and Clang 3.8 have builtins to detect overflows +#define Q_INTRINSIC_MUL_OVERFLOW64 + +template inline +typename std::enable_if_t || std::is_signed_v, bool> +qAddOverflow(T v1, T v2, T *r) +{ return __builtin_add_overflow(v1, v2, r); } + +template inline +typename std::enable_if_t || std::is_signed_v, bool> +qSubOverflow(T v1, T v2, T *r) +{ return __builtin_sub_overflow(v1, v2, r); } + +template inline +typename std::enable_if_t || std::is_signed_v, bool> +qMulOverflow(T v1, T v2, T *r) +{ return __builtin_mul_overflow(v1, v2, r); } + +#else +// Generic implementations + +template inline typename std::enable_if_t, bool> +qAddOverflow(T v1, T v2, T *r) +{ + // unsigned additions are well-defined + *r = v1 + v2; + return v1 > T(v1 + v2); +} + +template inline typename std::enable_if_t, bool> +qAddOverflow(T v1, T v2, T *r) +{ + // Here's how we calculate the overflow: + // 1) unsigned addition is well-defined, so we can always execute it + // 2) conversion from unsigned back to signed is implementation- + // defined and in the implementations we use, it's a no-op. + // 3) signed integer overflow happens if the sign of the two input operands + // is the same but the sign of the result is different. In other words, + // the sign of the result must be the same as the sign of either + // operand. + + using U = typename std::make_unsigned_t; + *r = T(U(v1) + U(v2)); + + // If int is two's complement, assume all integer types are too. + if (std::is_same_v) { + // Two's complement equivalent (generates slightly shorter code): + // x ^ y is negative if x and y have different signs + // x & y is negative if x and y are negative + // (x ^ z) & (y ^ z) is negative if x and z have different signs + // AND y and z have different signs + return ((v1 ^ *r) & (v2 ^ *r)) < 0; + } + + bool s1 = (v1 < 0); + bool s2 = (v2 < 0); + bool sr = (*r < 0); + return s1 != sr && s2 != sr; + // also: return s1 == s2 && s1 != sr; +} + +template inline typename std::enable_if_t, bool> +qSubOverflow(T v1, T v2, T *r) +{ + // unsigned subtractions are well-defined + *r = v1 - v2; + return v1 < v2; +} + +template inline typename std::enable_if_t, bool> +qSubOverflow(T v1, T v2, T *r) +{ + // See above for explanation. This is the same with some signs reversed. + // We can't use qAddOverflow(v1, -v2, r) because it would be UB if + // v2 == std::numeric_limits::min(). + + using U = typename std::make_unsigned_t; + *r = T(U(v1) - U(v2)); + + if (std::is_same_v) + return ((v1 ^ *r) & (~v2 ^ *r)) < 0; + + bool s1 = (v1 < 0); + bool s2 = !(v2 < 0); + bool sr = (*r < 0); + return s1 != sr && s2 != sr; + // also: return s1 == s2 && s1 != sr; +} + +template inline +typename std::enable_if_t || std::is_signed_v, bool> +qMulOverflow(T v1, T v2, T *r) +{ + // use the next biggest type + // Note: for 64-bit systems where __int128 isn't supported, this will cause an error. + using LargerInt = QIntegerForSize; + using Larger = typename std::conditional_t, + typename LargerInt::Signed, typename LargerInt::Unsigned>; + Larger lr = Larger(v1) * Larger(v2); + *r = T(lr); + return lr > std::numeric_limits::max() || lr < std::numeric_limits::min(); +} + +# if defined(Q_INTRINSIC_MUL_OVERFLOW64) +template <> inline bool qMulOverflow(quint64 v1, quint64 v2, quint64 *r) +{ + *r = v1 * v2; + return Q_UMULH(v1, v2); +} +template <> inline bool qMulOverflow(qint64 v1, qint64 v2, qint64 *r) +{ + // This is slightly more complex than the unsigned case above: the sign bit + // of 'low' must be replicated as the entire 'high', so the only valid + // values for 'high' are 0 and -1. Use unsigned multiply since it's the same + // as signed for the low bits and use a signed right shift to verify that + // 'high' is nothing but sign bits that match the sign of 'low'. + + qint64 high = Q_SMULH(v1, v2); + *r = qint64(quint64(v1) * quint64(v2)); + return (*r >> 63) != high; +} + +# if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64) +template <> inline bool qMulOverflow(uint64_t v1, uint64_t v2, uint64_t *r) +{ + return qMulOverflow(v1,v2,reinterpret_cast(r)); +} + +template <> inline bool qMulOverflow(int64_t v1, int64_t v2, int64_t *r) +{ + return qMulOverflow(v1,v2,reinterpret_cast(r)); +} +# endif // OS_INTEGRITY ARM64 +# endif // Q_INTRINSIC_MUL_OVERFLOW64 + +# if defined(Q_CC_MSVC) && defined(Q_PROCESSOR_X86) +// We can use intrinsics for the unsigned operations with MSVC +template <> inline bool qAddOverflow(unsigned v1, unsigned v2, unsigned *r) +{ return _addcarry_u32(0, v1, v2, r); } + +// 32-bit qMulOverflow is fine with the generic code above + +template <> inline bool qAddOverflow(quint64 v1, quint64 v2, quint64 *r) +{ +# if defined(Q_PROCESSOR_X86_64) + return _addcarry_u64(0, v1, v2, reinterpret_cast(r)); +# else + uint low, high; + uchar carry = _addcarry_u32(0, unsigned(v1), unsigned(v2), &low); + carry = _addcarry_u32(carry, v1 >> 32, v2 >> 32, &high); + *r = (quint64(high) << 32) | low; + return carry; +# endif // !x86-64 +} +# endif // MSVC X86 +#endif // !GCC + +// Implementations for addition, subtraction or multiplication by a +// compile-time constant. For addition and subtraction, we simply call the code +// that detects overflow at runtime. For multiplication, we compare to the +// maximum possible values before multiplying to ensure no overflow happens. + +template bool qAddOverflow(T v1, std::integral_constant, T *r) +{ + return qAddOverflow(v1, V2, r); +} + +template bool qAddOverflow(T v1, T *r) +{ + return qAddOverflow(v1, std::integral_constant{}, r); +} + +template bool qSubOverflow(T v1, std::integral_constant, T *r) +{ + return qSubOverflow(v1, V2, r); +} + +template bool qSubOverflow(T v1, T *r) +{ + return qSubOverflow(v1, std::integral_constant{}, r); +} + +template bool qMulOverflow(T v1, std::integral_constant, T *r) +{ + // Runtime detection for anything smaller than or equal to a register + // width, as most architectures' multiplication instructions actually + // produce a result twice as wide as the input registers, allowing us to + // efficiently detect the overflow. + if constexpr (sizeof(T) <= sizeof(qregisteruint)) { + return qMulOverflow(v1, V2, r); + +#ifdef Q_INTRINSIC_MUL_OVERFLOW64 + } else if constexpr (sizeof(T) <= sizeof(quint64)) { + // If we have intrinsics detecting overflow of 64-bit multiplications, + // then detect overflows through them up to 64 bits. + return qMulOverflow(v1, V2, r); +#endif + + } else if constexpr (V2 == 0 || V2 == 1) { + // trivial cases (and simplify logic below due to division by zero) + *r = v1 * V2; + return false; + } else if constexpr (V2 == -1) { + // multiplication by -1 is valid *except* for signed minimum values + // (necessary to avoid diving min() by -1, which is an overflow) + if (v1 < 0 && v1 == std::numeric_limits::min()) + return true; + *r = -v1; + return false; + } else { + // For 64-bit multiplications on 32-bit platforms, let's instead compare v1 + // against the bounds that would overflow. + constexpr T Highest = std::numeric_limits::max() / V2; + constexpr T Lowest = std::numeric_limits::min() / V2; + if constexpr (Highest > Lowest) { + if (v1 > Highest || v1 < Lowest) + return true; + } else { + // this can only happen if V2 < 0 + static_assert(V2 < 0); + if (v1 > Lowest || v1 < Highest) + return true; + } + + *r = v1 * V2; + return false; + } +} + +template bool qMulOverflow(T v1, T *r) +{ + return qMulOverflow(v1, std::integral_constant{}, r); +} + QT_END_NAMESPACE #endif // QNUMERIC_H diff --git a/src/corelib/global/qnumeric_p.h b/src/corelib/global/qnumeric_p.h index a11057dfff..823a1812de 100644 --- a/src/corelib/global/qnumeric_p.h +++ b/src/corelib/global/qnumeric_p.h @@ -53,29 +53,11 @@ // #include "QtCore/private/qglobal_p.h" +#include "QtCore/qnumeric.h" #include #include #include -#if defined(Q_CC_MSVC) -# include -# include -# if defined(Q_PROCESSOR_X86_64) || defined(Q_PROCESSOR_ARM_64) -# define Q_INTRINSIC_MUL_OVERFLOW64 -# define Q_UMULH(v1, v2) __umulh(v1, v2); -# define Q_SMULH(v1, v2) __mulh(v1, v2); -# pragma intrinsic(__umulh) -# pragma intrinsic(__mulh) -# endif -#endif - -# if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64) -#include -# define Q_INTRINSIC_MUL_OVERFLOW64 -# define Q_UMULH(v1, v2) __MULUH64(v1, v2); -# define Q_SMULH(v1, v2) __MULSH64(v1, v2); -#endif - #if !defined(Q_CC_MSVC) && (defined(Q_OS_QNX) || defined(Q_CC_INTEL)) # include # ifdef isnan @@ -249,245 +231,38 @@ QT_WARNING_DISABLE_FLOAT_COMPARE QT_WARNING_POP } -// Overflow math. -// This provides efficient implementations for int, unsigned, qsizetype and -// size_t. Implementations for 8- and 16-bit types will work but may not be as -// efficient. Implementations for 64-bit may be missing on 32-bit platforms. - -#if ((defined(Q_CC_INTEL) ? (Q_CC_INTEL >= 1800 && !defined(Q_OS_WIN)) : defined(Q_CC_GNU)) \ - && Q_CC_GNU >= 500) || __has_builtin(__builtin_add_overflow) -// GCC 5, ICC 18, and Clang 3.8 have builtins to detect overflows -#define Q_INTRINSIC_MUL_OVERFLOW64 - -template inline -typename std::enable_if::value || std::is_signed::value, bool>::type -add_overflow(T v1, T v2, T *r) -{ return __builtin_add_overflow(v1, v2, r); } - -template inline -typename std::enable_if::value || std::is_signed::value, bool>::type -sub_overflow(T v1, T v2, T *r) -{ return __builtin_sub_overflow(v1, v2, r); } - -template inline -typename std::enable_if::value || std::is_signed::value, bool>::type -mul_overflow(T v1, T v2, T *r) -{ return __builtin_mul_overflow(v1, v2, r); } - -#else -// Generic implementations - -template inline typename std::enable_if::value, bool>::type -add_overflow(T v1, T v2, T *r) -{ - // unsigned additions are well-defined - *r = v1 + v2; - return v1 > T(v1 + v2); -} - -template inline typename std::enable_if::value, bool>::type -add_overflow(T v1, T v2, T *r) -{ - // Here's how we calculate the overflow: - // 1) unsigned addition is well-defined, so we can always execute it - // 2) conversion from unsigned back to signed is implementation- - // defined and in the implementations we use, it's a no-op. - // 3) signed integer overflow happens if the sign of the two input operands - // is the same but the sign of the result is different. In other words, - // the sign of the result must be the same as the sign of either - // operand. - - using U = typename std::make_unsigned::type; - *r = T(U(v1) + U(v2)); - - // If int is two's complement, assume all integer types are too. - if (std::is_same::value) { - // Two's complement equivalent (generates slightly shorter code): - // x ^ y is negative if x and y have different signs - // x & y is negative if x and y are negative - // (x ^ z) & (y ^ z) is negative if x and z have different signs - // AND y and z have different signs - return ((v1 ^ *r) & (v2 ^ *r)) < 0; - } - - bool s1 = (v1 < 0); - bool s2 = (v2 < 0); - bool sr = (*r < 0); - return s1 != sr && s2 != sr; - // also: return s1 == s2 && s1 != sr; -} - -template inline typename std::enable_if::value, bool>::type -sub_overflow(T v1, T v2, T *r) -{ - // unsigned subtractions are well-defined - *r = v1 - v2; - return v1 < v2; -} - -template inline typename std::enable_if::value, bool>::type -sub_overflow(T v1, T v2, T *r) -{ - // See above for explanation. This is the same with some signs reversed. - // We can't use add_overflow(v1, -v2, r) because it would be UB if - // v2 == std::numeric_limits::min(). - - using U = typename std::make_unsigned::type; - *r = T(U(v1) - U(v2)); - - if (std::is_same::value) - return ((v1 ^ *r) & (~v2 ^ *r)) < 0; - - bool s1 = (v1 < 0); - bool s2 = !(v2 < 0); - bool sr = (*r < 0); - return s1 != sr && s2 != sr; - // also: return s1 == s2 && s1 != sr; -} - -template inline -typename std::enable_if::value || std::is_signed::value, bool>::type -mul_overflow(T v1, T v2, T *r) -{ - // use the next biggest type - // Note: for 64-bit systems where __int128 isn't supported, this will cause an error. - using LargerInt = QIntegerForSize; - using Larger = typename std::conditional::value, - typename LargerInt::Signed, typename LargerInt::Unsigned>::type; - Larger lr = Larger(v1) * Larger(v2); - *r = T(lr); - return lr > std::numeric_limits::max() || lr < std::numeric_limits::min(); -} - -# if defined(Q_INTRINSIC_MUL_OVERFLOW64) -template <> inline bool mul_overflow(quint64 v1, quint64 v2, quint64 *r) -{ - *r = v1 * v2; - return Q_UMULH(v1, v2); -} -template <> inline bool mul_overflow(qint64 v1, qint64 v2, qint64 *r) -{ - // This is slightly more complex than the unsigned case above: the sign bit - // of 'low' must be replicated as the entire 'high', so the only valid - // values for 'high' are 0 and -1. Use unsigned multiply since it's the same - // as signed for the low bits and use a signed right shift to verify that - // 'high' is nothing but sign bits that match the sign of 'low'. - - qint64 high = Q_SMULH(v1, v2); - *r = qint64(quint64(v1) * quint64(v2)); - return (*r >> 63) != high; -} - -# if defined(Q_OS_INTEGRITY) && defined(Q_PROCESSOR_ARM_64) -template <> inline bool mul_overflow(uint64_t v1, uint64_t v2, uint64_t *r) -{ - return mul_overflow(v1,v2,reinterpret_cast(r)); -} - -template <> inline bool mul_overflow(int64_t v1, int64_t v2, int64_t *r) -{ - return mul_overflow(v1,v2,reinterpret_cast(r)); -} -# endif // OS_INTEGRITY ARM64 -# endif // Q_INTRINSIC_MUL_OVERFLOW64 - -# if defined(Q_CC_MSVC) && defined(Q_PROCESSOR_X86) -// We can use intrinsics for the unsigned operations with MSVC -template <> inline bool add_overflow(unsigned v1, unsigned v2, unsigned *r) -{ return _addcarry_u32(0, v1, v2, r); } - -// 32-bit mul_overflow is fine with the generic code above - -template <> inline bool add_overflow(quint64 v1, quint64 v2, quint64 *r) -{ -# if defined(Q_PROCESSOR_X86_64) - return _addcarry_u64(0, v1, v2, reinterpret_cast(r)); -# else - uint low, high; - uchar carry = _addcarry_u32(0, unsigned(v1), unsigned(v2), &low); - carry = _addcarry_u32(carry, v1 >> 32, v2 >> 32, &high); - *r = (quint64(high) << 32) | low; - return carry; -# endif // !x86-64 -} -# endif // MSVC X86 -#endif // !GCC - -// Implementations for addition, subtraction or multiplication by a -// compile-time constant. For addition and subtraction, we simply call the code -// that detects overflow at runtime. For multiplication, we compare to the -// maximum possible values before multiplying to ensure no overflow happens. +template inline bool add_overflow(T v1, T v2, T *r) { return qAddOverflow(v1, v2, r); } +template inline bool sub_overflow(T v1, T v2, T *r) { return qSubOverflow(v1, v2, r); } +template inline bool mul_overflow(T v1, T v2, T *r) { return qMulOverflow(v1, v2, r); } template bool add_overflow(T v1, std::integral_constant, T *r) { - return add_overflow(v1, V2, r); + return qAddOverflow(v1, std::integral_constant{}, r); } template bool add_overflow(T v1, T *r) { - return add_overflow(v1, std::integral_constant{}, r); + return qAddOverflow(v1, r); } template bool sub_overflow(T v1, std::integral_constant, T *r) { - return sub_overflow(v1, V2, r); + return qSubOverflow(v1, std::integral_constant{}, r); } template bool sub_overflow(T v1, T *r) { - return sub_overflow(v1, std::integral_constant{}, r); + return qSubOverflow(v1, r); } template bool mul_overflow(T v1, std::integral_constant, T *r) { - // Runtime detection for anything smaller than or equal to a register - // width, as most architectures' multiplication instructions actually - // produce a result twice as wide as the input registers, allowing us to - // efficiently detect the overflow. - if constexpr (sizeof(T) <= sizeof(qregisteruint)) { - return mul_overflow(v1, V2, r); - -#ifdef Q_INTRINSIC_MUL_OVERFLOW64 - } else if constexpr (sizeof(T) <= sizeof(quint64)) { - // If we have intrinsics detecting overflow of 64-bit multiplications, - // then detect overflows through them up to 64 bits. - return mul_overflow(v1, V2, r); -#endif - - } else if constexpr (V2 == 0 || V2 == 1) { - // trivial cases (and simplify logic below due to division by zero) - *r = v1 * V2; - return false; - } else if constexpr (V2 == -1) { - // multiplication by -1 is valid *except* for signed minimum values - // (necessary to avoid diving min() by -1, which is an overflow) - if (v1 < 0 && v1 == std::numeric_limits::min()) - return true; - *r = -v1; - return false; - } else { - // For 64-bit multiplications on 32-bit platforms, let's instead compare v1 - // against the bounds that would overflow. - constexpr T Highest = std::numeric_limits::max() / V2; - constexpr T Lowest = std::numeric_limits::min() / V2; - if constexpr (Highest > Lowest) { - if (v1 > Highest || v1 < Lowest) - return true; - } else { - // this can only happen if V2 < 0 - static_assert(V2 < 0); - if (v1 > Lowest || v1 < Highest) - return true; - } - - *r = v1 * V2; - return false; - } + return qMulOverflow(v1, std::integral_constant{}, r); } template bool mul_overflow(T v1, T *r) { - return mul_overflow(v1, std::integral_constant{}, r); + return qMulOverflow(v1, r); } } #endif // Q_CLANG_QDOC -- cgit v1.2.3