From 1955353149781a064bc053b86be6e221b159d4ee Mon Sep 17 00:00:00 2001 From: Thiago Macieira Date: Sun, 31 Jul 2011 19:14:42 -0300 Subject: Port the IA-64 atomics to the new QBasicAtomicXXX architecture The IA-64 architecture supports the actual memory ordering semantics in many instructions, but not all. We actually implement the functions for all operations, so we get the best possible output. It does support proper load-acquire and store-release semantics, but we don't need instructions for it: the ABI requires that a volatile load be acquire and a volatile store be release. The Intel and HP compiler codepaths are rewritten, but untested. Change-Id: I7aa62a4ec65f63a97d1bbd8418bb2492c2be465f Reviewed-by: Bradley T. Hughes Reviewed-by: Thiago Macieira --- src/corelib/arch/qatomic_ia64.h | 966 +++++++++++++++++++++++++--------------- 1 file changed, 618 insertions(+), 348 deletions(-) (limited to 'src/corelib/arch/qatomic_ia64.h') diff --git a/src/corelib/arch/qatomic_ia64.h b/src/corelib/arch/qatomic_ia64.h index 8e562e811a..2b7d73fd32 100644 --- a/src/corelib/arch/qatomic_ia64.h +++ b/src/corelib/arch/qatomic_ia64.h @@ -1,7 +1,7 @@ /**************************************************************************** ** ** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies). -** All rights reserved. +** Copyright (C) 2011 Thiago Macieira ** Contact: http://www.qt-project.org/ ** ** This file is part of the QtCore module of the Qt Toolkit. @@ -42,69 +42,147 @@ #ifndef QATOMIC_IA64_H #define QATOMIC_IA64_H +#include + QT_BEGIN_HEADER QT_BEGIN_NAMESPACE +#if 0 +#pragma qt_sync_stop_processing +#endif + #define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE #define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE -inline bool QBasicAtomicInt::isReferenceCountingNative() -{ return true; } -inline bool QBasicAtomicInt::isReferenceCountingWaitFree() -{ return true; } - #define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE #define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE -inline bool QBasicAtomicInt::isTestAndSetNative() -{ return true; } -inline bool QBasicAtomicInt::isTestAndSetWaitFree() -{ return true; } - #define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE #define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE -inline bool QBasicAtomicInt::isFetchAndStoreNative() -{ return true; } -inline bool QBasicAtomicInt::isFetchAndStoreWaitFree() -{ return true; } - #define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE -inline bool QBasicAtomicInt::isFetchAndAddNative() -{ return true; } -inline bool QBasicAtomicInt::isFetchAndAddWaitFree() -{ return false; } +#define Q_ATOMIC_INT32_IS_SUPPORTED + +#define Q_ATOMIC_INT32_REFERENCE_COUNTING_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT32_REFERENCE_COUNTING_IS_WAIT_FREE + +#define Q_ATOMIC_INT32_TEST_AND_SET_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT32_TEST_AND_SET_IS_WAIT_FREE + +#define Q_ATOMIC_INT32_FETCH_AND_STORE_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT32_FETCH_AND_STORE_IS_WAIT_FREE + +#define Q_ATOMIC_INT32_FETCH_AND_ADD_IS_ALWAYS_NATIVE #define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE #define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::isTestAndSetNative() -{ return true; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::isTestAndSetWaitFree() -{ return true; } - #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::isFetchAndStoreNative() -{ return true; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::isFetchAndStoreWaitFree() -{ return true; } - #define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::isFetchAndAddNative() -{ return true; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::isFetchAndAddWaitFree() -{ return false; } +#define Q_ATOMIC_INT8_IS_SUPPORTED + +#define Q_ATOMIC_INT8_REFERENCE_COUNTING_IS_ALWAYS_NATIVE + +#define Q_ATOMIC_INT8_TEST_AND_SET_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT8_TEST_AND_SET_IS_WAIT_FREE + +#define Q_ATOMIC_INT8_FETCH_AND_STORE_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT8_FETCH_AND_STORE_IS_WAIT_FREE + +#define Q_ATOMIC_INT8_FETCH_AND_ADD_IS_ALWAYS_NATIVE + +#define Q_ATOMIC_INT16_IS_SUPPORTED + +#define Q_ATOMIC_INT16_REFERENCE_COUNTING_IS_ALWAYS_NATIVE + +#define Q_ATOMIC_INT16_TEST_AND_SET_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT16_TEST_AND_SET_IS_WAIT_FREE + +#define Q_ATOMIC_INT16_FETCH_AND_STORE_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT16_FETCH_AND_STORE_IS_WAIT_FREE + +#define Q_ATOMIC_INT16_FETCH_AND_ADD_IS_ALWAYS_NATIVE + +#define Q_ATOMIC_INT64_IS_SUPPORTED + +#define Q_ATOMIC_INT64_REFERENCE_COUNTING_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT64_REFERENCE_COUNTING_IS_WAIT_FREE + +#define Q_ATOMIC_INT64_TEST_AND_SET_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT64_TEST_AND_SET_IS_WAIT_FREE + +#define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_ALWAYS_NATIVE +#define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_WAIT_FREE + +#define Q_ATOMIC_INT64_FETCH_AND_ADD_IS_ALWAYS_NATIVE + +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; +template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; + +template struct QBasicAtomicOps: QGenericAtomicOps > +{ + static void orderedMemoryFence(); + + template static inline + T loadAcquire(T &_q_value) + { + return *static_cast(&_q_value); + } + + template static inline + void storeRelease(T &_q_value, T newValue) + { + *static_cast(&_q_value) = newValue; + } + static inline bool isReferenceCountingNative() { return true; } + static inline bool isReferenceCountingWaitFree() { return size == 4 || size == 8; } + template static bool ref(T &_q_value); + template static bool deref(T &_q_value); + + static inline bool isTestAndSetNative() { return true; } + static inline bool isTestAndSetWaitFree() { return true; } + template static bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue); + template static bool testAndSetAcquire(T &_q_value, T expectedValue, T newValue); + template static bool testAndSetRelease(T &_q_value, T expectedValue, T newValue); + template static bool testAndSetOrdered(T &_q_value, T expectedValue, T newValue); + + static inline bool isFetchAndStoreNative() { return true; } + static inline bool isFetchAndStoreWaitFree() { return true; } + template static T fetchAndStoreRelaxed(T &_q_value, T newValue); + template static T fetchAndStoreAcquire(T &_q_value, T newValue); + template static T fetchAndStoreRelease(T &_q_value, T newValue); + template static T fetchAndStoreOrdered(T &_q_value, T newValue); + + static inline bool isFetchAndAddNative() { return true; } + static inline bool isFetchAndAddWaitFree() { return false; } + template static + T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd); + template static + T fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd); + template static + T fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd); + template static + T fetchAndAddOrdered(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd); +}; + +template struct QAtomicOps : QBasicAtomicOps +{ + typedef T Type; +}; inline bool _q_ia64_fetchadd_immediate(register int value) { @@ -119,14 +197,14 @@ inline bool _q_ia64_fetchadd_immediate(register int value) // intrinsics provided by the Intel C++ Compiler #include -inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue) +template inline +void QBasicAtomicOps::orderedMemoryFence() { - return static_cast(_InterlockedExchange(&_q_value, newValue)); + __memory_barrier(); } -inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue) +inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue) { - __memory_barrier(); return static_cast(_InterlockedExchange(&_q_value, newValue)); } @@ -157,12 +235,6 @@ inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue) == expectedValue); } -inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue) -{ - __memory_barrier(); - return testAndSetAcquire(expectedValue, newValue); -} - inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd) { if (__builtin_constant_p(valueToAdd)) { @@ -186,12 +258,6 @@ inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd) return _InterlockedExchangeAdd(&_q_value, valueToAdd); } -inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd) -{ - __memory_barrier(); - return fetchAndAddAcquire(valueToAdd); -} - inline bool QBasicAtomicInt::ref() { return _InterlockedIncrement(&_q_value) != 0; @@ -208,13 +274,6 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreAcquire(T *newValue) return (T *)_InterlockedExchangePointer(reinterpret_cast(&_q_value), newValue); } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreRelease(T *newValue) -{ - __memory_barrier(); - return fetchAndStoreAcquire(newValue); -} - template Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetRelaxed(T *expectedValue, T *newValue) { @@ -251,13 +310,6 @@ Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetRelease(T *expectedValu == quintptr(expectedValue)); } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetOrdered(T *expectedValue, T *newValue) -{ - __memory_barrier(); - return testAndSetAcquire(expectedValue, newValue); -} - template Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddAcquire(qptrdiff valueToAdd) { @@ -273,41 +325,114 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddRelease(qptrdiff valueTo valueToAdd * sizeof(T)); } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddOrdered(qptrdiff valueToAdd) +#elif defined(Q_CC_GNU) + +template inline +void QBasicAtomicOps::orderedMemoryFence() { - __memory_barrier(); - return fetchAndAddAcquire(valueToAdd); + asm volatile("mf" ::: "memory"); } -#else // !Q_CC_INTEL +template<> template inline +bool QBasicAtomicOps<4>::ref(T &_q_value) +{ + T ret; + asm volatile("fetchadd4.acq %0=%1,1\n" + : "=r" (ret), "+m" (_q_value) + : + : "memory"); + return ret != -1; +} -# if defined(Q_CC_GNU) +template<> template inline +bool QBasicAtomicOps<4>::deref(T &_q_value) +{ + T ret; + asm volatile("fetchadd4.rel %0=%1,-1\n" + : "=r" (ret), "+m" (_q_value) + : + : "memory"); + return ret != 1; +} -inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue) +template<> template inline +bool QBasicAtomicOps<8>::ref(T &_q_value) { - int ret; - asm volatile("xchg4 %0=%1,%2\n" + T ret; + asm volatile("fetchadd8.acq %0=%1,1\n" : "=r" (ret), "+m" (_q_value) - : "r" (newValue) + : : "memory"); - return ret; + return ret != -1; } -inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue) +template<> template inline +bool QBasicAtomicOps<8>::deref(T &_q_value) { - int ret; - asm volatile("mf\n" - "xchg4 %0=%1,%2\n" + T ret; + asm volatile("fetchadd8.rel %0=%1,-1\n" : "=r" (ret), "+m" (_q_value) - : "r" (newValue) + : : "memory"); - return ret; + return ret != 1; } -inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue) +template<> template inline +bool QBasicAtomicOps<1>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) +{ + T ret; + asm volatile("mov ar.ccv=%2\n" + ";;\n" + "cmpxchg1.acq %0=%1,%3,ar.ccv\n" + : "=r" (ret), "+m" (_q_value) + : "r" (expectedValue), "r" (newValue) + : "memory"); + return ret == expectedValue; +} + +template<> template inline +bool QBasicAtomicOps<1>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) +{ + T ret; + asm volatile("mov ar.ccv=%2\n" + ";;\n" + "cmpxchg1.rel %0=%1,%3,ar.ccv\n" + : "=r" (ret), "+m" (_q_value) + : "r" (expectedValue), "r" (newValue) + : "memory"); + return ret == expectedValue; +} + +template<> template inline +bool QBasicAtomicOps<2>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) +{ + T ret; + asm volatile("mov ar.ccv=%2\n" + ";;\n" + "cmpxchg2.acq %0=%1,%3,ar.ccv\n" + : "=r" (ret), "+m" (_q_value) + : "r" (expectedValue), "r" (newValue) + : "memory"); + return ret == expectedValue; +} + +template<> template inline +bool QBasicAtomicOps<2>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) { - int ret; + T ret; + asm volatile("mov ar.ccv=%2\n" + ";;\n" + "cmpxchg2.rel %0=%1,%3,ar.ccv\n" + : "=r" (ret), "+m" (_q_value) + : "r" (expectedValue), "r" (newValue) + : "memory"); + return ret == expectedValue; +} + +template<> template inline +bool QBasicAtomicOps<4>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) +{ + T ret; asm volatile("mov ar.ccv=%2\n" ";;\n" "cmpxchg4.acq %0=%1,%3,ar.ccv\n" @@ -317,9 +442,10 @@ inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue) return ret == expectedValue; } -inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue) +template<> template inline +bool QBasicAtomicOps<4>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) { - int ret; + T ret; asm volatile("mov ar.ccv=%2\n" ";;\n" "cmpxchg4.rel %0=%1,%3,ar.ccv\n" @@ -329,9 +455,173 @@ inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue) return ret == expectedValue; } -inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd) +template<> template inline +bool QBasicAtomicOps<8>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) +{ + T ret; + asm volatile("mov ar.ccv=%2\n" + ";;\n" + "cmpxchg8.acq %0=%1,%3,ar.ccv\n" + : "=r" (ret), "+m" (_q_value) + : "r" (expectedValue), "r" (newValue) + : "memory"); + return ret == expectedValue; +} + +template<> template inline +bool QBasicAtomicOps<8>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) +{ + T ret; + asm volatile("mov ar.ccv=%2\n" + ";;\n" + "cmpxchg8.rel %0=%1,%3,ar.ccv\n" + : "=r" (ret), "+m" (_q_value) + : "r" (expectedValue), "r" (newValue) + : "memory"); + return ret == expectedValue; +} + +template<> template inline +T QBasicAtomicOps<1>::fetchAndStoreAcquire(T &_q_value, T newValue) +{ + T ret; + asm volatile("xchg1 %0=%1,%2\n" + : "=r" (ret), "+m" (_q_value) + : "r" (newValue) + : "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<2>::fetchAndStoreAcquire(T &_q_value, T newValue) +{ + T ret; + asm volatile("xchg2 %0=%1,%2\n" + : "=r" (ret), "+m" (_q_value) + : "r" (newValue) + : "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<4>::fetchAndStoreAcquire(T &_q_value, T newValue) { - int ret; + T ret; + asm volatile("xchg4 %0=%1,%2\n" + : "=r" (ret), "+m" (_q_value) + : "r" (newValue) + : "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<8>::fetchAndStoreAcquire(T &_q_value, T newValue) +{ + T ret; + asm volatile("xchg8 %0=%1,%2\n" + : "=r" (ret), "+m" (_q_value) + : "r" (newValue) + : "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<1>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; + + ret = _q_value; + asm volatile("0:\n" + " mov r9=%0\n" + " mov ar.ccv=%0\n" + " add %0=%0, %2\n" + " ;;\n" + " cmpxchg1.acq %0=%1,%0,ar.ccv\n" + " ;;\n" + " cmp.ne p6,p0 = %0, r9\n" + "(p6) br.dptk 0b\n" + "1:\n" + : "+r" (ret), "+m" (_q_value) + : "r" (valueToAdd) + : "r9", "p6", "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<1>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; + + ret = _q_value; + asm volatile("0:\n" + " mov r9=%0\n" + " mov ar.ccv=%0\n" + " add %0=%0, %2\n" + " ;;\n" + " cmpxchg1.rel %0=%1,%0,ar.ccv\n" + " ;;\n" + " cmp.ne p6,p0 = %0, r9\n" + "(p6) br.dptk 0b\n" + "1:\n" + : "+r" (ret), "+m" (_q_value) + : "r" (valueToAdd) + : "r9", "p6", "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<2>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; + + ret = _q_value; + asm volatile("0:\n" + " mov r9=%0\n" + " mov ar.ccv=%0\n" + " add %0=%0, %2\n" + " ;;\n" + " cmpxchg2.acq %0=%1,%0,ar.ccv\n" + " ;;\n" + " cmp.ne p6,p0 = %0, r9\n" + "(p6) br.dptk 0b\n" + "1:\n" + : "+r" (ret), "+m" (_q_value) + : "r" (valueToAdd) + : "r9", "p6", "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<2>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; + + ret = _q_value; + asm volatile("0:\n" + " mov r9=%0\n" + " mov ar.ccv=%0\n" + " add %0=%0, %2\n" + " ;;\n" + " cmpxchg2.rel %0=%1,%0,ar.ccv\n" + " ;;\n" + " cmp.ne p6,p0 = %0, r9\n" + "(p6) br.dptk 0b\n" + "1:\n" + : "+r" (ret), "+m" (_q_value) + : "r" (valueToAdd) + : "r9", "p6", "memory"); + return ret; +} + +template<> template inline +T QBasicAtomicOps<4>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; #if (__GNUC__ >= 4) // We implement a fast fetch-and-add when we can @@ -362,9 +652,11 @@ inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd) return ret; } -inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd) +template<> template inline +T QBasicAtomicOps<4>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - int ret; + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; #if (__GNUC__ >= 4) // We implement a fast fetch-and-add when we can @@ -395,92 +687,18 @@ inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd) return ret; } -inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd) -{ - asm volatile("mf" ::: "memory"); - return fetchAndAddRelease(valueToAdd); -} - -inline bool QBasicAtomicInt::ref() -{ - int ret; - asm volatile("fetchadd4.acq %0=%1,1\n" - : "=r" (ret), "+m" (_q_value) - : - : "memory"); - return ret != -1; -} - -inline bool QBasicAtomicInt::deref() -{ - int ret; - asm volatile("fetchadd4.rel %0=%1,-1\n" - : "=r" (ret), "+m" (_q_value) - : - : "memory"); - return ret != 1; -} - -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreAcquire(T *newValue) -{ - T *ret; - asm volatile("xchg8 %0=%1,%2\n" - : "=r" (ret), "+m" (_q_value) - : "r" (newValue) - : "memory"); - return ret; -} - -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreRelease(T *newValue) +template<> template inline +T QBasicAtomicOps<8>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - T *ret; - asm volatile("mf\n" - "xchg8 %0=%1,%2\n" - : "=r" (ret), "+m" (_q_value) - : "r" (newValue) - : "memory"); - return ret; -} - -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetAcquire(T *expectedValue, T *newValue) -{ - T *ret; - asm volatile("mov ar.ccv=%2\n" - ";;\n" - "cmpxchg8.acq %0=%1,%3,ar.ccv\n" - : "=r" (ret), "+m" (_q_value) - : "r" (expectedValue), "r" (newValue) - : "memory"); - return ret == expectedValue; -} - -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetRelease(T *expectedValue, T *newValue) -{ - T *ret; - asm volatile("mov ar.ccv=%2\n" - ";;\n" - "cmpxchg8.rel %0=%1,%3,ar.ccv\n" - : "=r" (ret), "+m" (_q_value) - : "r" (expectedValue), "r" (newValue) - : "memory"); - return ret == expectedValue; -} - -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddAcquire(qptrdiff valueToAdd) -{ - T *ret; + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; #if (__GNUC__ >= 4) // We implement a fast fetch-and-add when we can - if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) { + if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) { asm volatile("fetchadd8.acq %0=%1,%2\n" : "=r" (ret), "+m" (_q_value) - : "i" (valueToAdd * sizeof(T)) + : "i" (valueToAdd) : "memory"); return ret; } @@ -499,22 +717,23 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddAcquire(qptrdiff valueTo "(p6) br.dptk 0b\n" "1:\n" : "+r" (ret), "+m" (_q_value) - : "r" (valueToAdd * sizeof(T)) + : "r" (valueToAdd) : "r9", "p6", "memory"); return ret; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddRelease(qptrdiff valueToAdd) +template<> template inline +T QBasicAtomicOps<8>::fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - T *ret; + T ret; + valueToAdd *= QAtomicAdditiveType::AddScale; #if (__GNUC__ >= 4) // We implement a fast fetch-and-add when we can - if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) { + if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) { asm volatile("fetchadd8.rel %0=%1,%2\n" : "=r" (ret), "+m" (_q_value) - : "i" (valueToAdd * sizeof(T)) + : "i" (valueToAdd) : "memory"); return ret; } @@ -533,18 +752,11 @@ Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddRelease(qptrdiff valueTo "(p6) br.dptk 0b\n" "1:\n" : "+r" (ret), "+m" (_q_value) - : "r" (valueToAdd * sizeof(T)) + : "r" (valueToAdd) : "r9", "p6", "memory"); return ret; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddOrdered(qptrdiff valueToAdd) -{ - asm volatile("mf" ::: "memory"); - return fetchAndAddRelease(valueToAdd); -} - #elif defined Q_CC_HPACC QT_BEGIN_INCLUDE_NAMESPACE @@ -553,257 +765,315 @@ QT_END_INCLUDE_NAMESPACE #define FENCE (_Asm_fence)(_UP_CALL_FENCE | _UP_SYS_FENCE | _DOWN_CALL_FENCE | _DOWN_SYS_FENCE) -inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue) +template inline +void QBasicAtomicOps::orderedMemoryFence() { - return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue, - (_Asm_ldhint)_LDHINT_NONE, FENCE); + _Asm_mf(FENCE); } -inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue) +template<> template inline +bool QBasicAtomicOps<4>::ref(T &_q_value) { - _Asm_mf(FENCE); - return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue, - (_Asm_ldhint)_LDHINT_NONE, FENCE); + return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ, + &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1; } -inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue) +template<> template inline +bool QBasicAtomicOps<4>::deref(T &_q_value) { - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE); - int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, (unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE); - return ret == expectedValue; + return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL, + &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1; } -inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue) +template<> template inline +bool QBasicAtomicOps<8>::ref(T &_q_value) { - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE); - int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL, - &_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE); - return ret == expectedValue; + return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_D, (_Asm_sem)_SEM_ACQ, + &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1; } -inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd) +template<> template inline +bool QBasicAtomicOps<8>::deref(T &_q_value) { - if (valueToAdd == 1) - return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE); - else if (valueToAdd == -1) - return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE); - - // implement the test-and-set loop - register int old, ret; - do { - old = _q_value; - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE); - ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); - } while (ret != old); - return old; + return (T)_Asm_fetchadd((_Asm_fasz)_FASZ_D, (_Asm_sem)_SEM_REL, + &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1; } -inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd) +template<> template inline +bool QBasicAtomicOps<1>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) { - if (valueToAdd == 1) - return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL, - &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE); - else if (valueToAdd == -1) - return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL, - &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE); - - // implement the test-and-set loop - register int old, ret; - do { - old = _q_value; - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE); - ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL, - &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); - } while (ret != old); - return old; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)expectedValue, FENCE); + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_ACQ, + &_q_value, (quint8)newValue, (_Asm_ldhint)_LDHINT_NONE); + return ret == expectedValue; } -inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd) +template<> template inline +bool QBasicAtomicOps<1>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) { - _Asm_mf(FENCE); - return fetchAndAddAcquire(valueToAdd); + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)expectedValue, FENCE); + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_REL, + &_q_value, (quint8)newValue, (_Asm_ldhint)_LDHINT_NONE); + return ret == expectedValue; } -inline bool QBasicAtomicInt::ref() +template<> template inline +bool QBasicAtomicOps<2>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) { - return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)expectedValue, FENCE); + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_ACQ, + &_q_value, (quint16)newValue, (_Asm_ldhint)_LDHINT_NONE); + return ret == expectedValue; } -inline bool QBasicAtomicInt::deref() +template<> template inline +bool QBasicAtomicOps<2>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) { - return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL, - &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)expectedValue, FENCE); + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_REL, + &_q_value, (quint16)newValue, (_Asm_ldhint)_LDHINT_NONE); + return ret == expectedValue; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreAcquire(T *newValue) +template<> template inline +bool QBasicAtomicOps<4>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) { -#ifdef __LP64__ - return (T *)_Asm_xchg((_Asm_sz)_SZ_D, &_q_value, (quint64)newValue, - (_Asm_ldhint)_LDHINT_NONE, FENCE); -#else - return (T *)_Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (quint32)newValue, - (_Asm_ldhint)_LDHINT_NONE, FENCE); -#endif + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE); + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ, + &_q_value, (unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE); + return ret == expectedValue; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreRelease(T *newValue) +template<> template inline +bool QBasicAtomicOps<4>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) { - _Asm_mf(FENCE); - return fetchAndStoreAcquire(newValue); + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE); + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL, + &_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE); + return ret == expectedValue; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetAcquire(T *expectedValue, T *newValue) +template<> template inline +bool QBasicAtomicOps<8>::testAndSetAcquire(T &_q_value, T expectedValue, T newValue) { -#ifdef __LP64__ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE); - T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ, - &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE); -#else - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE); - T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE); -#endif + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ, + &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE); return ret == expectedValue; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetRelease(T *expectedValue, T *newValue) +template<> template inline +bool QBasicAtomicOps<8>::testAndSetRelease(T &_q_value, T expectedValue, T newValue) { -#ifdef __LP64__ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE); - T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL, - &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE); -#else - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE); - T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL, - &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE); -#endif + T ret = (T)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL, + &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE); return ret == expectedValue; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddAcquire(qptrdiff valueToAdd) +template<> template inline +T QBasicAtomicOps<1>::fetchAndStoreAcquire(T &_q_value, T newValue) { + return (T)_Asm_xchg((_Asm_sz)_SZ_B, &_q_value, (quint8)newValue, + (_Asm_ldhint)_LDHINT_NONE, FENCE); +} + +template<> template inline +T QBasicAtomicOps<2>::fetchAndStoreAcquire(T &_q_value, T newValue) +{ + return (T)_Asm_xchg((_Asm_sz)_SZ_H, &_q_value, (quint16)newValue, + (_Asm_ldhint)_LDHINT_NONE, FENCE); +} + +template<> template inline +T QBasicAtomicOps<4>::fetchAndStoreAcquire(T &_q_value, T newValue) +{ + return (T)_Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue, + (_Asm_ldhint)_LDHINT_NONE, FENCE); +} + +template<> template inline +T QBasicAtomicOps<8>::fetchAndStoreAcquire(T &_q_value, T newValue) +{ + return (T)_Asm_xchg((_Asm_sz)_SZ_D, &_q_value, (quint64)newValue, + (_Asm_ldhint)_LDHINT_NONE, FENCE); +} + +template<> template inline +T QBasicAtomicOps<1>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + valueToAdd *= QAtomicAdditiveType::AddScale; // implement the test-and-set loop - register T *old, *ret; + register T old, ret; do { old = _q_value; -#ifdef __LP64__ - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE); - ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ, - &_q_value, (quint64)(old + valueToAdd), - (_Asm_ldhint)_LDHINT_NONE); -#else - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE); - ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ, - &_q_value, (quint32)(old + valueToAdd), - (_Asm_ldhint)_LDHINT_NONE); -#endif - } while (old != ret); + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_ACQ, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); return old; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddRelease(qptrdiff valueToAdd) +template<> template inline +T QBasicAtomicOps<1>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { // implement the test-and-set loop - register T *old, *ret; + register T old, ret; do { old = _q_value; -#ifdef __LP64__ - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE); - ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL, - &_q_value, (quint64)(old + valueToAdd), - (_Asm_ldhint)_LDHINT_NONE); -#else - _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE); - ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL, - &_q_value, (quint32)(old + valueToAdd), - (_Asm_ldhint)_LDHINT_NONE); -#endif - } while (old != ret); + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint8)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_B, (_Asm_sem)_SEM_REL, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); return old; } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddOrdered(qptrdiff valueToAdd) +template<> template inline +T QBasicAtomicOps<2>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - _Asm_mf(FENCE); - return fetchAndAddAcquire(valueToAdd); + valueToAdd *= QAtomicAdditiveType::AddScale; + // implement the test-and-set loop + register T old, ret; + do { + old = _q_value; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_ACQ, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); + return old; } -#else +template<> template inline +T QBasicAtomicOps<2>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + // implement the test-and-set loop + register T old, ret; + do { + old = _q_value; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint16)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_H, (_Asm_sem)_SEM_REL, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); + return old; +} -extern "C" { - Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval); - Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval); -} // extern "C" +template<> template inline +T QBasicAtomicOps<4>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + valueToAdd *= QAtomicAdditiveType::AddScale; + // implement the test-and-set loop + register T old, ret; + do { + old = _q_value; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); + return old; +} -#endif +template<> template inline +T QBasicAtomicOps<4>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) +{ + // implement the test-and-set loop + register T old, ret; + do { + old = _q_value; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); + return old; +} -inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue) +template<> template inline +T QBasicAtomicOps<8>::fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - return testAndSetAcquire(expectedValue, newValue); + valueToAdd *= QAtomicAdditiveType::AddScale; + // implement the test-and-set loop + register T old, ret; + do { + old = _q_value; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); + return old; } -inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue) +template<> template inline +T QBasicAtomicOps<8>::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - return testAndSetAcquire(expectedValue, newValue); + // implement the test-and-set loop + register T old, ret; + do { + old = _q_value; + _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE); + ret = _Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL, + &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE); + } while (ret != old); + return old; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetRelaxed(T *expectedValue, T *newValue) +#endif + +template template inline +bool QBasicAtomicOps::ref(T &_q_value) { - return testAndSetAcquire(expectedValue, newValue); + // no fetchadd for 1 or 2 bytes + return fetchAndAddRelaxed(_q_value, 1) == -1; } -template -Q_INLINE_TEMPLATE bool QBasicAtomicPointer::testAndSetOrdered(T *expectedValue, T *newValue) +template template inline +bool QBasicAtomicOps::deref(T &_q_value) { - return testAndSetAcquire(expectedValue, newValue); + // no fetchadd for 1 or 2 bytes + return fetchAndAddRelaxed(_q_value, -1) == 1; } -#endif // Q_CC_INTEL +template template inline +bool QBasicAtomicOps::testAndSetRelaxed(T &_q_value, T expectedValue, T newValue) +{ + return testAndSetAcquire(_q_value, expectedValue, newValue); +} -inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue) +template template inline +bool QBasicAtomicOps::testAndSetOrdered(T &_q_value, T expectedValue, T newValue) { - return fetchAndStoreAcquire(newValue); + orderedMemoryFence(); + return testAndSetAcquire(_q_value, expectedValue, newValue); } -inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue) +template template inline +T QBasicAtomicOps::fetchAndStoreRelaxed(T &_q_value, T newValue) { - return fetchAndStoreRelease(newValue); + return fetchAndStoreAcquire(_q_value, newValue); } -inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd) +template template inline +T QBasicAtomicOps::fetchAndStoreRelease(T &_q_value, T newValue) { - return fetchAndAddAcquire(valueToAdd); + orderedMemoryFence(); + return fetchAndStoreAcquire(_q_value, newValue); } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreRelaxed(T *newValue) +template template inline +T QBasicAtomicOps::fetchAndStoreOrdered(T &_q_value, T newValue) { - return fetchAndStoreAcquire(newValue); + return fetchAndStoreRelease(_q_value, newValue); } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndStoreOrdered(T *newValue) +template template inline +T QBasicAtomicOps::fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - return fetchAndStoreRelaxed(newValue); + return fetchAndAddAcquire(_q_value, valueToAdd); } -template -Q_INLINE_TEMPLATE T *QBasicAtomicPointer::fetchAndAddRelaxed(qptrdiff valueToAdd) +template template inline +T QBasicAtomicOps::fetchAndAddOrdered(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) { - return fetchAndAddAcquire(valueToAdd); + orderedMemoryFence(); + return fetchAndAddRelease(_q_value, valueToAdd); } QT_END_NAMESPACE -- cgit v1.2.3