summaryrefslogtreecommitdiffstats
path: root/src/corelib/arch
diff options
context:
space:
mode:
Diffstat (limited to 'src/corelib/arch')
-rw-r--r--src/corelib/arch/qatomic_cxx11.h77
-rw-r--r--src/corelib/arch/qatomic_gcc.h12
2 files changed, 51 insertions, 38 deletions
diff --git a/src/corelib/arch/qatomic_cxx11.h b/src/corelib/arch/qatomic_cxx11.h
index 3119edaf45..ed19064724 100644
--- a/src/corelib/arch/qatomic_cxx11.h
+++ b/src/corelib/arch/qatomic_cxx11.h
@@ -105,56 +105,56 @@ template<> struct QAtomicIntegerTraits<char32_t> { enum { IsInteger = 1 }; };
#define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_ALWAYS_NATIVE
#define Q_ATOMIC_INT64_FETCH_AND_ADD_IS_ALWAYS_NATIVE
-template <typename T> struct QAtomicOps
+template <typename X> struct QAtomicOps
{
- typedef std::atomic<T> Type;
- typedef typename QAtomicAdditiveType<T>::AdditiveT _AdditiveType;
- static const int AddScale = QAtomicAdditiveType<T>::AddScale;
+ typedef std::atomic<X> Type;
- static inline
- T load(const Type &_q_value) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T load(const std::atomic<T> &_q_value) Q_DECL_NOTHROW
{
return _q_value.load(std::memory_order_relaxed);
}
- static inline
- T load(const volatile Type &_q_value) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T load(const volatile std::atomic<T> &_q_value) Q_DECL_NOTHROW
{
return _q_value.load(std::memory_order_relaxed);
}
- static inline
- T loadAcquire(const Type &_q_value) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T loadAcquire(const std::atomic<T> &_q_value) Q_DECL_NOTHROW
{
return _q_value.load(std::memory_order_acquire);
}
- static inline
- T loadAcquire(const volatile Type &_q_value) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T loadAcquire(const volatile std::atomic<T> &_q_value) Q_DECL_NOTHROW
{
return _q_value.load(std::memory_order_acquire);
}
- static inline
- void store(Type &_q_value, T newValue) Q_DECL_NOTHROW
+ template <typename T> static inline
+ void store(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{
_q_value.store(newValue, std::memory_order_relaxed);
}
- static inline
- void storeRelease(Type &_q_value, T newValue) Q_DECL_NOTHROW
+ template <typename T> static inline
+ void storeRelease(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{
_q_value.store(newValue, std::memory_order_release);
}
static inline Q_DECL_CONSTEXPR bool isReferenceCountingNative() Q_DECL_NOTHROW { return true; }
static inline Q_DECL_CONSTEXPR bool isReferenceCountingWaitFree() Q_DECL_NOTHROW { return false; }
- static inline bool ref(Type &_q_value)
+ template <typename T>
+ static inline bool ref(std::atomic<T> &_q_value)
{
return ++_q_value != 0;
}
- static inline bool deref(Type &_q_value) Q_DECL_NOTHROW
+ template <typename T>
+ static inline bool deref(std::atomic<T> &_q_value) Q_DECL_NOTHROW
{
return --_q_value != 0;
}
@@ -162,22 +162,25 @@ template <typename T> struct QAtomicOps
static inline Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; }
static inline Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; }
- static
+ template <typename T> static
bool testAndSetRelaxed(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_relaxed);
}
+ template <typename T>
static bool testAndSetAcquire(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acquire);
}
+ template <typename T>
static bool testAndSetRelease(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_release);
}
+ template <typename T>
static bool testAndSetOrdered(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{
return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acq_rel);
@@ -186,22 +189,26 @@ template <typename T> struct QAtomicOps
static inline Q_DECL_CONSTEXPR bool isFetchAndStoreNative() Q_DECL_NOTHROW { return false; }
static inline Q_DECL_CONSTEXPR bool isFetchAndStoreWaitFree() Q_DECL_NOTHROW { return false; }
- static T fetchAndStoreRelaxed(Type &_q_value, T newValue) Q_DECL_NOTHROW
+ template <typename T>
+ static T fetchAndStoreRelaxed(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{
return _q_value.exchange(newValue, std::memory_order_relaxed);
}
- static T fetchAndStoreAcquire(Type &_q_value, T newValue) Q_DECL_NOTHROW
+ template <typename T>
+ static T fetchAndStoreAcquire(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{
return _q_value.exchange(newValue, std::memory_order_acquire);
}
- static T fetchAndStoreRelease(Type &_q_value, T newValue) Q_DECL_NOTHROW
+ template <typename T>
+ static T fetchAndStoreRelease(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{
return _q_value.exchange(newValue, std::memory_order_release);
}
- static T fetchAndStoreOrdered(Type &_q_value, T newValue) Q_DECL_NOTHROW
+ template <typename T>
+ static T fetchAndStoreOrdered(std::atomic<T> &_q_value, T newValue) Q_DECL_NOTHROW
{
return _q_value.exchange(newValue, std::memory_order_acq_rel);
}
@@ -209,31 +216,31 @@ template <typename T> struct QAtomicOps
static inline Q_DECL_CONSTEXPR bool isFetchAndAddNative() Q_DECL_NOTHROW { return false; }
static inline Q_DECL_CONSTEXPR bool isFetchAndAddWaitFree() Q_DECL_NOTHROW { return false; }
- static
- T fetchAndAddRelaxed(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T fetchAndAddRelaxed(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{
- return _q_value.fetch_add(valueToAdd * AddScale,
+ return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_relaxed);
}
- static
- T fetchAndAddAcquire(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T fetchAndAddAcquire(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{
- return _q_value.fetch_add(valueToAdd * AddScale,
+ return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_acquire);
}
- static
- T fetchAndAddRelease(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T fetchAndAddRelease(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{
- return _q_value.fetch_add(valueToAdd * AddScale,
+ return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_release);
}
- static
- T fetchAndAddOrdered(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW
+ template <typename T> static inline
+ T fetchAndAddOrdered(std::atomic<T> &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{
- return _q_value.fetch_add(valueToAdd * AddScale,
+ return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType<T>::AddScale,
std::memory_order_acq_rel);
}
};
diff --git a/src/corelib/arch/qatomic_gcc.h b/src/corelib/arch/qatomic_gcc.h
index bd296053e5..61e709655d 100644
--- a/src/corelib/arch/qatomic_gcc.h
+++ b/src/corelib/arch/qatomic_gcc.h
@@ -75,16 +75,17 @@ template<> struct QAtomicIntegerTraits<char32_t> { enum { IsInteger = 1 }; };
#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_SOMETIMES_NATIVE
#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_SOMETIMES_NATIVE
-template <typename T> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<T> >
+template <typename X> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<X> >
{
// The GCC intrinsics all have fully-ordered memory semantics, so we define
// only the xxxRelaxed functions. The exception is __sync_lock_and_test,
// which has acquire semantics, so we need to define the Release and
// Ordered versions too.
- typedef T Type;
+ typedef X Type;
#ifndef __ia64__
+ template <typename T>
static T loadAcquire(const T &_q_value) Q_DECL_NOTHROW
{
T tmp = _q_value;
@@ -92,6 +93,7 @@ template <typename T> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<T> >
return tmp;
}
+ template <typename T>
static void storeRelease(T &_q_value, T newValue) Q_DECL_NOTHROW
{
__sync_synchronize();
@@ -101,28 +103,32 @@ template <typename T> struct QAtomicOps: QGenericAtomicOps<QAtomicOps<T> >
static Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; }
static Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; }
+ template <typename T>
static bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW
{
return __sync_bool_compare_and_swap(&_q_value, expectedValue, newValue);
}
+ template <typename T>
static T fetchAndStoreRelaxed(T &_q_value, T newValue) Q_DECL_NOTHROW
{
return __sync_lock_test_and_set(&_q_value, newValue);
}
+ template <typename T>
static T fetchAndStoreRelease(T &_q_value, T newValue) Q_DECL_NOTHROW
{
__sync_synchronize();
return __sync_lock_test_and_set(&_q_value, newValue);
}
+ template <typename T>
static T fetchAndStoreOrdered(T &_q_value, T newValue) Q_DECL_NOTHROW
{
return fetchAndStoreRelease(_q_value, newValue);
}
- static
+ template <typename T> static
T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd) Q_DECL_NOTHROW
{
return __sync_fetch_and_add(&_q_value, valueToAdd * QAtomicAdditiveType<T>::AddScale);