summaryrefslogtreecommitdiffstats
path: root/src/corelib/global/qnumeric_p.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/corelib/global/qnumeric_p.h')
-rw-r--r--src/corelib/global/qnumeric_p.h117
1 files changed, 117 insertions, 0 deletions
diff --git a/src/corelib/global/qnumeric_p.h b/src/corelib/global/qnumeric_p.h
index 5fa0a881a3..e5f9d8e13e 100644
--- a/src/corelib/global/qnumeric_p.h
+++ b/src/corelib/global/qnumeric_p.h
@@ -1,6 +1,7 @@
/****************************************************************************
**
** Copyright (C) 2015 The Qt Company Ltd.
+** Copyright (C) 2015 Intel Corporation.
** Contact: http://www.qt.io/licensing/
**
** This file is part of the QtCore module of the Qt Toolkit.
@@ -47,6 +48,18 @@
#include "QtCore/qglobal.h"
+#include <limits>
+
+#if defined(Q_OS_WIN) && !defined(Q_OS_WINCE)
+# include <intrin.h>
+#elif defined(Q_CC_INTEL)
+# include <immintrin.h> // for _addcarry_u<nn>
+#endif
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
QT_BEGIN_NAMESPACE
#if !defined(Q_CC_MIPS)
@@ -188,6 +201,110 @@ static inline bool qt_is_finite(float d)
}
}
+//
+// Overflow math
+//
+namespace {
+template <typename T> inline typename QtPrivate::QEnableIf<QtPrivate::is_unsigned<T>::value, bool>::Type
+add_overflow(T v1, T v2, T *r)
+{
+ // unsigned additions are well-defined
+ *r = v1 + v2;
+ return v1 > T(v1 + v2);
+}
+
+template <typename T> inline typename QtPrivate::QEnableIf<QtPrivate::is_unsigned<T>::value, bool>::Type
+mul_overflow(T v1, T v2, T *r)
+{
+ // use the next biggest type
+ // Note: for 64-bit systems where __int128 isn't supported, this will cause an error.
+ // A fallback is present below.
+ typedef typename QIntegerForSize<sizeof(T) * 2>::Unsigned Larger;
+ Larger lr = Larger(v1) * Larger(v2);
+ *r = T(lr);
+ return lr > std::numeric_limits<T>::max();
+}
+
+#if defined(__SIZEOF_INT128__)
+# define HAVE_MUL64_OVERFLOW
+#endif
+
+// GCC 5 and Clang have builtins to detect overflows
+#if (defined(Q_CC_GNU) && !defined(Q_CC_INTEL) && Q_CC_GNU >= 500) || __has_builtin(__builtin_uadd_overflow)
+template <> inline bool add_overflow(unsigned v1, unsigned v2, unsigned *r)
+{ return __builtin_uadd_overflow(v1, v2, r); }
+#endif
+#if (defined(Q_CC_GNU) && !defined(Q_CC_INTEL) && Q_CC_GNU >= 500) || __has_builtin(__builtin_uaddl_overflow)
+template <> inline bool add_overflow(unsigned long v1, unsigned long v2, unsigned long *r)
+{ return __builtin_uaddl_overflow(v1, v2, r); }
+#endif
+#if (defined(Q_CC_GNU) && !defined(Q_CC_INTEL) && Q_CC_GNU >= 500) || __has_builtin(__builtin_uaddll_overflow)
+template <> inline bool add_overflow(unsigned long long v1, unsigned long long v2, unsigned long long *r)
+{ return __builtin_uaddll_overflow(v1, v2, r); }
+#endif
+
+#if (defined(Q_CC_GNU) && !defined(Q_CC_INTEL) && Q_CC_GNU >= 500) || __has_builtin(__builtin_umul_overflow)
+template <> inline bool mul_overflow(unsigned v1, unsigned v2, unsigned *r)
+{ return __builtin_umul_overflow(v1, v2, r); }
+#endif
+#if (defined(Q_CC_GNU) && !defined(Q_CC_INTEL) && Q_CC_GNU >= 500) || __has_builtin(__builtin_umull_overflow)
+template <> inline bool mul_overflow(unsigned long v1, unsigned long v2, unsigned long *r)
+{ return __builtin_umull_overflow(v1, v2, r); }
+#endif
+#if (defined(Q_CC_GNU) && !defined(Q_CC_INTEL) && Q_CC_GNU >= 500) || __has_builtin(__builtin_umulll_overflow)
+template <> inline bool mul_overflow(unsigned long long v1, unsigned long long v2, unsigned long long *r)
+{ return __builtin_umulll_overflow(v1, v2, r); }
+# define HAVE_MUL64_OVERFLOW
+#endif
+
+#if ((defined(Q_CC_MSVC) && _MSC_VER >= 1800) || defined(Q_CC_INTEL)) && defined(Q_PROCESSOR_X86)
+template <> inline bool add_overflow(unsigned v1, unsigned v2, unsigned *r)
+{ return _addcarry_u32(0, v1, v2, r); }
+# ifdef Q_CC_MSVC // longs are 32-bit
+template <> inline bool add_overflow(unsigned long v1, unsigned long v2, unsigned long *r)
+{ return _addcarry_u32(0, v1, v2, reinterpret_cast<unsigned *>(r)); }
+# endif
+#endif
+#if ((defined(Q_CC_MSVC) && _MSC_VER >= 1800) || defined(Q_CC_INTEL)) && defined(Q_PROCESSOR_X86_64)
+template <> inline bool add_overflow(quint64 v1, quint64 v2, quint64 *r)
+{ return _addcarry_u64(0, v1, v2, reinterpret_cast<unsigned __int64 *>(r)); }
+# ifndef Q_CC_MSVC // longs are 64-bit
+template <> inline bool add_overflow(unsigned long v1, unsigned long v2, unsigned long *r)
+{ return _addcarry_u64(0, v1, v2, reinterpret_cast<unsigned __int64 *>(r)); }
+# endif
+#endif
+
+#if defined(Q_CC_MSVC) && (defined(Q_PROCESSOR_X86_64) || defined(Q_PROCESSOR_IA64))
+#pragma intrinsic(_umul128)
+template <> inline bool mul_overflow(quint64 v1, quint64 v2, quint64 *r)
+{
+ // use 128-bit multiplication with the _umul128 intrinsic
+ // https://msdn.microsoft.com/en-us/library/3dayytw9.aspx
+ quint64 high;
+ *r = _umul128(v1, v2, &high);
+ return high;
+}
+# define HAVE_MUL64_OVERFLOW
+#endif
+
+#if !defined(HAVE_MUL64_OVERFLOW) && defined(__LP64__)
+// no 128-bit multiplication, we need to figure out with a slow division
+template <> inline bool mul_overflow(quint64 v1, quint64 v2, quint64 *r)
+{
+ if (v2 && v1 > std::numeric_limits<quint64>::max() / v2)
+ return true;
+ *r = v1 * v2;
+ return false;
+}
+template <> inline bool mul_overflow(unsigned long v1, unsigned long v2, unsigned long *r)
+{
+ return mul_overflow<quint64>(v1, v2, reinterpret_cast<quint64 *>(r));
+}
+#else
+# undef HAVE_MUL64_OVERFLOW
+#endif
+}
+
QT_END_NAMESPACE
#endif // QNUMERIC_P_H