summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/corelib/arch/qatomic_arch.h2
-rw-r--r--src/corelib/arch/qatomic_ia64.h966
-rw-r--r--src/corelib/thread/qbasicatomic.h2
3 files changed, 620 insertions, 350 deletions
diff --git a/src/corelib/arch/qatomic_arch.h b/src/corelib/arch/qatomic_arch.h
index 4b63375d6a..7e23438f69 100644
--- a/src/corelib/arch/qatomic_arch.h
+++ b/src/corelib/arch/qatomic_arch.h
@@ -58,8 +58,6 @@ QT_BEGIN_HEADER
# include "QtCore/qatomic_bfin.h"
#elif defined(QT_ARCH_GENERIC)
# include "QtCore/qatomic_generic.h"
-#elif defined(QT_ARCH_IA64)
-# include "QtCore/qatomic_ia64.h"
#elif defined(QT_ARCH_MACOSX)
# include "QtCore/qatomic_macosx.h"
#elif defined(QT_ARCH_PARISC)
diff --git a/src/corelib/arch/qatomic_ia64.h b/src/corelib/arch/qatomic_ia64.h
index 8e562e811a..2b7d73fd32 100644
--- a/src/corelib/arch/qatomic_ia64.h
+++ b/src/corelib/arch/qatomic_ia64.h
@@ -1,7 +1,7 @@
/****************************************************************************
**
** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies).
-** All rights reserved.
+** Copyright (C) 2011 Thiago Macieira <thiago@kde.org>
** Contact: http://www.qt-project.org/
**
** This file is part of the QtCore module of the Qt Toolkit.
@@ -42,69 +42,147 @@
#ifndef QATOMIC_IA64_H
#define QATOMIC_IA64_H
+#include <QtCore/qgenericatomic.h>
+
QT_BEGIN_HEADER
QT_BEGIN_NAMESPACE
+#if 0
+#pragma qt_sync_stop_processing
+#endif
+
#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE
-inline bool QBasicAtomicInt::isReferenceCountingNative()
-{ return true; }
-inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
-{ return true; }
-
#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
#define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
-inline bool QBasicAtomicInt::isTestAndSetNative()
-{ return true; }
-inline bool QBasicAtomicInt::isTestAndSetWaitFree()
-{ return true; }
-
#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
-inline bool QBasicAtomicInt::isFetchAndStoreNative()
-{ return true; }
-inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
-{ return true; }
-
#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
-inline bool QBasicAtomicInt::isFetchAndAddNative()
-{ return true; }
-inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
-{ return false; }
+#define Q_ATOMIC_INT32_IS_SUPPORTED
+
+#define Q_ATOMIC_INT32_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT32_REFERENCE_COUNTING_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT32_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT32_TEST_AND_SET_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT32_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT32_FETCH_AND_STORE_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT32_FETCH_AND_ADD_IS_ALWAYS_NATIVE
#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
-{ return true; }
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
-{ return true; }
-
#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
-{ return true; }
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
-{ return true; }
-
#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
-{ return true; }
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
-{ return false; }
+#define Q_ATOMIC_INT8_IS_SUPPORTED
+
+#define Q_ATOMIC_INT8_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+#define Q_ATOMIC_INT8_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT8_TEST_AND_SET_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT8_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT8_FETCH_AND_STORE_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT8_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+#define Q_ATOMIC_INT16_IS_SUPPORTED
+
+#define Q_ATOMIC_INT16_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+#define Q_ATOMIC_INT16_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT16_TEST_AND_SET_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT16_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT16_FETCH_AND_STORE_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT16_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+#define Q_ATOMIC_INT64_IS_SUPPORTED
+
+#define Q_ATOMIC_INT64_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT64_REFERENCE_COUNTING_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT64_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT64_TEST_AND_SET_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_WAIT_FREE
+
+#define Q_ATOMIC_INT64_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template<> struct QAtomicIntegerTraits<int> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<unsigned int> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<char> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<signed char> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<unsigned char> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<short> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<unsigned short> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<long> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<unsigned long> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<long long> { enum { IsInteger = 1 }; };
+template<> struct QAtomicIntegerTraits<unsigned long long> { enum { IsInteger = 1 }; };
+
+template <int size> struct QBasicAtomicOps: QGenericAtomicOps<QBasicAtomicOps<size> >
+{
+ static void orderedMemoryFence();
+
+ template <typename T> static inline
+ T loadAcquire(T &_q_value)
+ {
+ return *static_cast<volatile T *>(&_q_value);
+ }
+
+ template <typename T> static inline
+ void storeRelease(T &_q_value, T newValue)
+ {
+ *static_cast<volatile T *>(&_q_value) = newValue;
+ }
+ static inline bool isReferenceCountingNative() { return true; }
+ static inline bool isReferenceCountingWaitFree() { return size == 4 || size == 8; }
+ template <typename T> static bool ref(T &_q_value);
+ template <typename T> static bool deref(T &_q_value);
+
+ static inline bool isTestAndSetNative() { return true; }
+ static inline bool isTestAndSetWaitFree() { return true; }
+ template <typename T> static bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue);
+ template <typename T> static bool testAndSetAcquire(T &_q_value, T expectedValue, T newValue);
+ template <typename T> static bool testAndSetRelease(T &_q_value, T expectedValue, T newValue);
+ template <typename T> static bool testAndSetOrdered(T &_q_value, T expectedValue, T newValue);
+
+ static inline bool isFetchAndStoreNative() { return true; }
+ static inline bool isFetchAndStoreWaitFree() { return true; }
+ template <typename T> static T fetchAndStoreRelaxed(T &_q_value, T newValue);
+ template <typename T> static T fetchAndStoreAcquire(T &_q_value, T newValue);
+ template <typename T> static T fetchAndStoreRelease(T &_q_value, T newValue);
+ template <typename T> static T fetchAndStoreOrdered(T &_q_value, T newValue);
+
+ static inline bool isFetchAndAddNative() { return true; }
+ static inline bool isFetchAndAddWaitFree() { return false; }
+ template <typename T> static
+ T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd);
+ template <typename T> static
+ T fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd);
+ template <typename T> static
+ T fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd);
+ template <typename T> static
+ T fetchAndAddOrdered(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd);
+};
+
+template <typename T> struct QAtomicOps : QBasicAtomicOps<sizeof(T)>
+{
+ typedef T Type;
+};
inline bool _q_ia64_fetchadd_immediate(register int value)
{
@@ -119,14 +197,14 @@ inline bool _q_ia64_fetchadd_immediate(register int value)
// intrinsics provided by the Intel C++ Compiler
#include <ia64intrin.h>
-inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+template<int size> inline
+void QBasicAtomicOps<size>::orderedMemoryFence()
{
- return static_cast<int>(_InterlockedExchange(&_q_value, newValue));
+ __memory_barrier();
}
-inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
{
- __memory_barrier();
return static_cast<int>(_InterlockedExchange(&_q_value, newValue));
}
@@ -157,12 +235,6 @@ inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
== expectedValue);
}
-inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
-{
- __memory_barrier();
- return testAndSetAcquire(expectedValue, newValue);
-}
-
inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
{
if (__builtin_constant_p(valueToAdd)) {
@@ -186,12 +258,6 @@ inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
return _InterlockedExchangeAdd(&_q_value, valueToAdd);
}
-inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
-{
- __memory_barrier();
- return fetchAndAddAcquire(valueToAdd);
-}
-
inline bool QBasicAtomicInt::ref()
{
return _InterlockedIncrement(&_q_value) != 0;
@@ -209,13 +275,6 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
}
template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
-{
- __memory_barrier();
- return fetchAndStoreAcquire(newValue);
-}
-
-template <typename T>
Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
{
register T *expectedValueCopy = expectedValue;
@@ -252,13 +311,6 @@ Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValu
}
template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
-{
- __memory_barrier();
- return testAndSetAcquire(expectedValue, newValue);
-}
-
-template <typename T>
Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
{
return (T *)_InterlockedExchangeAdd64((volatile long *)&_q_value,
@@ -273,41 +325,114 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueTo
valueToAdd * sizeof(T));
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+#elif defined(Q_CC_GNU)
+
+template<int size> inline
+void QBasicAtomicOps<size>::orderedMemoryFence()
{
- __memory_barrier();
- return fetchAndAddAcquire(valueToAdd);
+ asm volatile("mf" ::: "memory");
}
-#else // !Q_CC_INTEL
+template<> template<typename T> inline
+bool QBasicAtomicOps<4>::ref(T &_q_value)
+{
+ T ret;
+ asm volatile("fetchadd4.acq %0=%1,1\n"
+ : "=r" (ret), "+m" (_q_value)
+ :
+ : "memory");
+ return ret != -1;
+}
-# if defined(Q_CC_GNU)
+template<> template<typename T> inline
+bool QBasicAtomicOps<4>::deref(T &_q_value)
+{
+ T ret;
+ asm volatile("fetchadd4.rel %0=%1,-1\n"
+ : "=r" (ret), "+m" (_q_value)
+ :
+ : "memory");
+ return ret != 1;
+}
-inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+template<> template<typename T> inline
+bool QBasicAtomicOps<8>::ref(T &_q_value)
{
- int ret;
- asm volatile("xchg4 %0=%1,%2\n"
+ T ret;
+ asm volatile("fetchadd8.acq %0=%1,1\n"
: "=r" (ret), "+m" (_q_value)
- : "r" (newValue)
+ :
: "memory");
- return ret;
+ return ret != -1;
}
-inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+template<> template<typename T> inline
+bool QBasicAtomicOps<8>::deref(T &_q_value)
{
- int ret;
- asm volatile("mf\n"
- "xchg4 %0=%1,%2\n"
+ T ret;
+ asm volatile("fetchadd8.rel %0=%1,-1\n"
: "=r" (ret), "+m" (_q_value)
- : "r" (newValue)
+ :
: "memory");
- return ret;
+ return ret != 1;
}
-inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+template<> template <typename T> inline
+bool QBasicAtomicOps<1>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
+{
+ T ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg1.acq %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template<> template <typename T> inline
+bool QBasicAtomicOps<1>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
+{
+ T ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg1.rel %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template<> template <typename T> inline
+bool QBasicAtomicOps<2>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
+{
+ T ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg2.acq %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template<> template <typename T> inline
+bool QBasicAtomicOps<2>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
{
- int ret;
+ T ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg2.rel %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template<> template <typename T> inline
+bool QBasicAtomicOps<4>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
+{
+ T ret;
asm volatile("mov ar.ccv=%2\n"
";;\n"
"cmpxchg4.acq %0=%1,%3,ar.ccv\n"
@@ -317,9 +442,10 @@ inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
return ret == expectedValue;
}
-inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+template<> template <typename T> inline
+bool QBasicAtomicOps<4>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
{
- int ret;
+ T ret;
asm volatile("mov ar.ccv=%2\n"
";;\n"
"cmpxchg4.rel %0=%1,%3,ar.ccv\n"
@@ -329,9 +455,173 @@ inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
return ret == expectedValue;
}
-inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+template<> template <typename T> inline
+bool QBasicAtomicOps<8>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
+{
+ T ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg8.acq %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template<> template <typename T> inline
+bool QBasicAtomicOps<8>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
+{
+ T ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg8.rel %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<1>::fetchAndStoreAcquire(T &_q_value, T newValue)
+{
+ T ret;
+ asm volatile("xchg1 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<2>::fetchAndStoreAcquire(T &_q_value, T newValue)
+{
+ T ret;
+ asm volatile("xchg2 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<4>::fetchAndStoreAcquire(T &_q_value, T newValue)
{
- int ret;
+ T ret;
+ asm volatile("xchg4 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<8>::fetchAndStoreAcquire(T &_q_value, T newValue)
+{
+ T ret;
+ asm volatile("xchg8 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<1>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg1.acq %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd)
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<1>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg1.rel %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd)
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<2>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg2.acq %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd)
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<2>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg2.rel %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd)
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<4>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
#if (__GNUC__ >= 4)
// We implement a fast fetch-and-add when we can
@@ -362,9 +652,11 @@ inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
return ret;
}
-inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+template<> template <typename T> inline
+T QBasicAtomicOps<4>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- int ret;
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
#if (__GNUC__ >= 4)
// We implement a fast fetch-and-add when we can
@@ -395,92 +687,18 @@ inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
return ret;
}
-inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
-{
- asm volatile("mf" ::: "memory");
- return fetchAndAddRelease(valueToAdd);
-}
-
-inline bool QBasicAtomicInt::ref()
-{
- int ret;
- asm volatile("fetchadd4.acq %0=%1,1\n"
- : "=r" (ret), "+m" (_q_value)
- :
- : "memory");
- return ret != -1;
-}
-
-inline bool QBasicAtomicInt::deref()
-{
- int ret;
- asm volatile("fetchadd4.rel %0=%1,-1\n"
- : "=r" (ret), "+m" (_q_value)
- :
- : "memory");
- return ret != 1;
-}
-
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
-{
- T *ret;
- asm volatile("xchg8 %0=%1,%2\n"
- : "=r" (ret), "+m" (_q_value)
- : "r" (newValue)
- : "memory");
- return ret;
-}
-
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+template<> template <typename T> inline
+T QBasicAtomicOps<8>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- T *ret;
- asm volatile("mf\n"
- "xchg8 %0=%1,%2\n"
- : "=r" (ret), "+m" (_q_value)
- : "r" (newValue)
- : "memory");
- return ret;
-}
-
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
-{
- T *ret;
- asm volatile("mov ar.ccv=%2\n"
- ";;\n"
- "cmpxchg8.acq %0=%1,%3,ar.ccv\n"
- : "=r" (ret), "+m" (_q_value)
- : "r" (expectedValue), "r" (newValue)
- : "memory");
- return ret == expectedValue;
-}
-
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
-{
- T *ret;
- asm volatile("mov ar.ccv=%2\n"
- ";;\n"
- "cmpxchg8.rel %0=%1,%3,ar.ccv\n"
- : "=r" (ret), "+m" (_q_value)
- : "r" (expectedValue), "r" (newValue)
- : "memory");
- return ret == expectedValue;
-}
-
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
-{
- T *ret;
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
#if (__GNUC__ >= 4)
// We implement a fast fetch-and-add when we can
- if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) {
+ if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) {
asm volatile("fetchadd8.acq %0=%1,%2\n"
: "=r" (ret), "+m" (_q_value)
- : "i" (valueToAdd * sizeof(T))
+ : "i" (valueToAdd)
: "memory");
return ret;
}
@@ -499,22 +717,23 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueTo
"(p6) br.dptk 0b\n"
"1:\n"
: "+r" (ret), "+m" (_q_value)
- : "r" (valueToAdd * sizeof(T))
+ : "r" (valueToAdd)
: "r9", "p6", "memory");
return ret;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+template<> template <typename T> inline
+T QBasicAtomicOps<8>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- T *ret;
+ T ret;
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
#if (__GNUC__ >= 4)
// We implement a fast fetch-and-add when we can
- if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) {
+ if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) {
asm volatile("fetchadd8.rel %0=%1,%2\n"
: "=r" (ret), "+m" (_q_value)
- : "i" (valueToAdd * sizeof(T))
+ : "i" (valueToAdd)
: "memory");
return ret;
}
@@ -533,18 +752,11 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueTo
"(p6) br.dptk 0b\n"
"1:\n"
: "+r" (ret), "+m" (_q_value)
- : "r" (valueToAdd * sizeof(T))
+ : "r" (valueToAdd)
: "r9", "p6", "memory");
return ret;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
-{
- asm volatile("mf" ::: "memory");
- return fetchAndAddRelease(valueToAdd);
-}
-
#elif defined Q_CC_HPACC
QT_BEGIN_INCLUDE_NAMESPACE
@@ -553,257 +765,315 @@ QT_END_INCLUDE_NAMESPACE
#define FENCE (_Asm_fence)(_UP_CALL_FENCE | _UP_SYS_FENCE | _DOWN_CALL_FENCE | _DOWN_SYS_FENCE)
-inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+template <int size> inline
+void QBasicAtomicOps<size>::orderedMemoryFence()
{
- return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
- (_Asm_ldhint)_LDHINT_NONE, FENCE);
+ _Asm_mf(FENCE);
}
-inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+template<> template<typename T> inline
+bool QBasicAtomicOps<4>::ref(T &_q_value)
{
- _Asm_mf(FENCE);
- return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
- (_Asm_ldhint)_LDHINT_NONE, FENCE);
+ return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1;
}
-inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+template<> template<typename T> inline
+bool QBasicAtomicOps<4>::deref(T &_q_value)
{
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
- int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, (unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE);
- return ret == expectedValue;
+ return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1;
}
-inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+template<> template<typename T> inline
+bool QBasicAtomicOps<8>::ref(T &_q_value)
{
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
- int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE);
- return ret == expectedValue;
+ return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_D, (_Asm_sem)_SEM_ACQ,
+ &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1;
}
-inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+template<> template<typename T> inline
+bool QBasicAtomicOps<8>::deref(T &_q_value)
{
- if (valueToAdd == 1)
- return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
- else if (valueToAdd == -1)
- return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
-
- // implement the test-and-set loop
- register int old, ret;
- do {
- old = _q_value;
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
- ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
- } while (ret != old);
- return old;
+ return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_D, (_Asm_sem)_SEM_REL,
+ &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1;
}
-inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+template<> template <typename T> inline
+bool QBasicAtomicOps<1>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
{
- if (valueToAdd == 1)
- return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
- else if (valueToAdd == -1)
- return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
-
- // implement the test-and-set loop
- register int old, ret;
- do {
- old = _q_value;
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
- ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
- } while (ret != old);
- return old;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)expectedValue, FENCE);
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint8)newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
}
-inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+template<> template <typename T> inline
+bool QBasicAtomicOps<1>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
{
- _Asm_mf(FENCE);
- return fetchAndAddAcquire(valueToAdd);
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)expectedValue, FENCE);
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint8)newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
}
-inline bool QBasicAtomicInt::ref()
+template<> template <typename T> inline
+bool QBasicAtomicOps<2>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
{
- return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)expectedValue, FENCE);
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint16)newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
}
-inline bool QBasicAtomicInt::deref()
+template<> template <typename T> inline
+bool QBasicAtomicOps<2>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
{
- return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)expectedValue, FENCE);
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint16)newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+template<> template <typename T> inline
+bool QBasicAtomicOps<4>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
{
-#ifdef __LP64__
- return (T *)_Asm_xchg((_Asm_sz)_SZ_D, &_q_value, (quint64)newValue,
- (_Asm_ldhint)_LDHINT_NONE, FENCE);
-#else
- return (T *)_Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (quint32)newValue,
- (_Asm_ldhint)_LDHINT_NONE, FENCE);
-#endif
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+template<> template <typename T> inline
+bool QBasicAtomicOps<4>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
{
- _Asm_mf(FENCE);
- return fetchAndStoreAcquire(newValue);
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
}
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+template<> template <typename T> inline
+bool QBasicAtomicOps<8>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
{
-#ifdef __LP64__
_Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE);
- T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
- &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
-#else
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE);
- T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE);
-#endif
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
return ret == expectedValue;
}
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+template<> template <typename T> inline
+bool QBasicAtomicOps<8>::testAndSetRelease(T &_q_value, T expectedValue, T newValue)
{
-#ifdef __LP64__
_Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE);
- T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
- &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
-#else
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE);
- T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE);
-#endif
+ T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
return ret == expectedValue;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+template<> template <typename T> inline
+T QBasicAtomicOps<1>::fetchAndStoreAcquire(T &_q_value, T newValue)
{
+ return (T)_Asm_xchg((_Asm_sz)_SZ_B, &_q_value, (quint8)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<2>::fetchAndStoreAcquire(T &_q_value, T newValue)
+{
+ return (T)_Asm_xchg((_Asm_sz)_SZ_H, &_q_value, (quint16)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<4>::fetchAndStoreAcquire(T &_q_value, T newValue)
+{
+ return (T)_Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<8>::fetchAndStoreAcquire(T &_q_value, T newValue)
+{
+ return (T)_Asm_xchg((_Asm_sz)_SZ_D, &_q_value, (quint64)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+}
+
+template<> template <typename T> inline
+T QBasicAtomicOps<1>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
// implement the test-and-set loop
- register T *old, *ret;
+ register T old, ret;
do {
old = _q_value;
-#ifdef __LP64__
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
- ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
- &_q_value, (quint64)(old + valueToAdd),
- (_Asm_ldhint)_LDHINT_NONE);
-#else
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE);
- ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
- &_q_value, (quint32)(old + valueToAdd),
- (_Asm_ldhint)_LDHINT_NONE);
-#endif
- } while (old != ret);
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_ACQ,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
return old;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+template<> template <typename T> inline
+T QBasicAtomicOps<1>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
// implement the test-and-set loop
- register T *old, *ret;
+ register T old, ret;
do {
old = _q_value;
-#ifdef __LP64__
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
- ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
- &_q_value, (quint64)(old + valueToAdd),
- (_Asm_ldhint)_LDHINT_NONE);
-#else
- _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE);
- ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
- &_q_value, (quint32)(old + valueToAdd),
- (_Asm_ldhint)_LDHINT_NONE);
-#endif
- } while (old != ret);
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_REL,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
return old;
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+template<> template <typename T> inline
+T QBasicAtomicOps<2>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- _Asm_mf(FENCE);
- return fetchAndAddAcquire(valueToAdd);
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+ // implement the test-and-set loop
+ register T old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_ACQ,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
}
-#else
+template<> template <typename T> inline
+T QBasicAtomicOps<2>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ // implement the test-and-set loop
+ register T old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_REL,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
+}
-extern "C" {
- Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
- Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
-} // extern "C"
+template<> template <typename T> inline
+T QBasicAtomicOps<4>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+ // implement the test-and-set loop
+ register T old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
+}
-#endif
+template<> template <typename T> inline
+T QBasicAtomicOps<4>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+{
+ // implement the test-and-set loop
+ register T old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
+}
-inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+template<> template <typename T> inline
+T QBasicAtomicOps<8>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- return testAndSetAcquire(expectedValue, newValue);
+ valueToAdd *= QAtomicAdditiveType<T>::AddScale;
+ // implement the test-and-set loop
+ register T old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
}
-inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+template<> template <typename T> inline
+T QBasicAtomicOps<8>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- return testAndSetAcquire(expectedValue, newValue);
+ // implement the test-and-set loop
+ register T old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
}
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+#endif
+
+template<int size> template<typename T> inline
+bool QBasicAtomicOps<size>::ref(T &_q_value)
{
- return testAndSetAcquire(expectedValue, newValue);
+ // no fetchadd for 1 or 2 bytes
+ return fetchAndAddRelaxed(_q_value, 1) == -1;
}
-template <typename T>
-Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+template<int size> template<typename T> inline
+bool QBasicAtomicOps<size>::deref(T &_q_value)
{
- return testAndSetAcquire(expectedValue, newValue);
+ // no fetchadd for 1 or 2 bytes
+ return fetchAndAddRelaxed(_q_value, -1) == 1;
}
-#endif // Q_CC_INTEL
+template<int size> template <typename T> inline
+bool QBasicAtomicOps<size>::testAndSetRelaxed(T &_q_value, T expectedValue, T newValue)
+{
+ return testAndSetAcquire(_q_value, expectedValue, newValue);
+}
-inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+template<int size> template <typename T> inline
+bool QBasicAtomicOps<size>::testAndSetOrdered(T &_q_value, T expectedValue, T newValue)
{
- return fetchAndStoreAcquire(newValue);
+ orderedMemoryFence();
+ return testAndSetAcquire(_q_value, expectedValue, newValue);
}
-inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+template<int size> template <typename T> inline
+T QBasicAtomicOps<size>::fetchAndStoreRelaxed(T &_q_value, T newValue)
{
- return fetchAndStoreRelease(newValue);
+ return fetchAndStoreAcquire(_q_value, newValue);
}
-inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+template<int size> template <typename T> inline
+T QBasicAtomicOps<size>::fetchAndStoreRelease(T &_q_value, T newValue)
{
- return fetchAndAddAcquire(valueToAdd);
+ orderedMemoryFence();
+ return fetchAndStoreAcquire(_q_value, newValue);
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+template<int size> template <typename T> inline
+T QBasicAtomicOps<size>::fetchAndStoreOrdered(T &_q_value, T newValue)
{
- return fetchAndStoreAcquire(newValue);
+ return fetchAndStoreRelease(_q_value, newValue);
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+template<int size> template <typename T> inline
+T QBasicAtomicOps<size>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- return fetchAndStoreRelaxed(newValue);
+ return fetchAndAddAcquire(_q_value, valueToAdd);
}
-template <typename T>
-Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+template<int size> template <typename T> inline
+T QBasicAtomicOps<size>::fetchAndAddOrdered(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
{
- return fetchAndAddAcquire(valueToAdd);
+ orderedMemoryFence();
+ return fetchAndAddRelease(_q_value, valueToAdd);
}
QT_END_NAMESPACE
diff --git a/src/corelib/thread/qbasicatomic.h b/src/corelib/thread/qbasicatomic.h
index 89192a5e91..a39ed0d56a 100644
--- a/src/corelib/thread/qbasicatomic.h
+++ b/src/corelib/thread/qbasicatomic.h
@@ -52,6 +52,8 @@
# include <QtCore/qatomic_arm.h>
#elif defined(__i386) || defined(__i386__)
# include <QtCore/qatomic_i386.h>
+#elif defined(__ia64) || defined(__ia64__)
+# include "QtCore/qatomic_ia64.h"
#elif defined(__mips) || defined(__mips__)
# include "QtCore/qatomic_mips.h"
#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64)