From cfa5c1698d357adb6c20716e60d51dc41c55ab7e Mon Sep 17 00:00:00 2001 From: Thiago Macieira Date: Mon, 16 Sep 2013 10:59:31 -0500 Subject: Fix the C++11 and GCC-atomic intrinsics when not using GCC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both Clang and ICC complain about the use of those atomics when used with a forward-declared pointee. GCC doesn't, which makes me think it's a GCC bug. When using QBasicAtomicPointer with these atomics, the _q_value member causes the instantiation of QAtomicOps, which causes the instantiation of the regular member function QAtomicOps::fetchAndAddRelaxed. The problem is that function takes a QAtomicAdditiveType::AdditiveT as parameter, which requires sizeof(Foo). Clang 3.3 and ICC 14 correctly expand and complain. GCC 4.7-4.9 apparently don't. The fix is to apply the same trick we used for the other atomics: change all ops functions (including fetchAndAddRelaxed) to be member templates. That way, they can't be expanded until the actual use. Clang errors: qgenericatomic.h:73:33: error: invalid application of 'sizeof' to an incomplete type 'QMutexData' qatomic_gcc.h:136:48: note: in instantiation of template class 'QAtomicAdditiveType' requested here qbasicatomic.h:272:22: note: in instantiation of template class 'QAtomicOps' requested here ICC errors: qgenericatomic.h(73): error: incomplete type is not allowed detected during: instantiation of class "QAtomicAdditiveType [with T=QMutexData]" at line 111 of "qatomic_cxx11.h" instantiation of class "QAtomicOps [with T=QMutexData *]" at line 272 of "qbasicatomic.h" Found-by: Tor Arne Change-Id: I9b10648cd47109a943b34a4c9926d77cd0c4fe12 Reviewed-by: Tor Arne Vestbø Reviewed-by: Olivier Goffart --- src/corelib/arch/qatomic_cxx11.h | 77 ++++++++++++++++++++++------------------ src/corelib/arch/qatomic_gcc.h | 12 +++++-- 2 files changed, 51 insertions(+), 38 deletions(-) (limited to 'src/corelib/arch') diff --git a/src/corelib/arch/qatomic_cxx11.h b/src/corelib/arch/qatomic_cxx11.h index 3119edaf45..ed19064724 100644 --- a/src/corelib/arch/qatomic_cxx11.h +++ b/src/corelib/arch/qatomic_cxx11.h @@ -105,56 +105,56 @@ template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; #define Q_ATOMIC_INT64_FETCH_AND_STORE_IS_ALWAYS_NATIVE #define Q_ATOMIC_INT64_FETCH_AND_ADD_IS_ALWAYS_NATIVE -template struct QAtomicOps +template struct QAtomicOps { - typedef std::atomic Type; - typedef typename QAtomicAdditiveType::AdditiveT _AdditiveType; - static const int AddScale = QAtomicAdditiveType::AddScale; + typedef std::atomic Type; - static inline - T load(const Type &_q_value) Q_DECL_NOTHROW + template static inline + T load(const std::atomic &_q_value) Q_DECL_NOTHROW { return _q_value.load(std::memory_order_relaxed); } - static inline - T load(const volatile Type &_q_value) Q_DECL_NOTHROW + template static inline + T load(const volatile std::atomic &_q_value) Q_DECL_NOTHROW { return _q_value.load(std::memory_order_relaxed); } - static inline - T loadAcquire(const Type &_q_value) Q_DECL_NOTHROW + template static inline + T loadAcquire(const std::atomic &_q_value) Q_DECL_NOTHROW { return _q_value.load(std::memory_order_acquire); } - static inline - T loadAcquire(const volatile Type &_q_value) Q_DECL_NOTHROW + template static inline + T loadAcquire(const volatile std::atomic &_q_value) Q_DECL_NOTHROW { return _q_value.load(std::memory_order_acquire); } - static inline - void store(Type &_q_value, T newValue) Q_DECL_NOTHROW + template static inline + void store(std::atomic &_q_value, T newValue) Q_DECL_NOTHROW { _q_value.store(newValue, std::memory_order_relaxed); } - static inline - void storeRelease(Type &_q_value, T newValue) Q_DECL_NOTHROW + template static inline + void storeRelease(std::atomic &_q_value, T newValue) Q_DECL_NOTHROW { _q_value.store(newValue, std::memory_order_release); } static inline Q_DECL_CONSTEXPR bool isReferenceCountingNative() Q_DECL_NOTHROW { return true; } static inline Q_DECL_CONSTEXPR bool isReferenceCountingWaitFree() Q_DECL_NOTHROW { return false; } - static inline bool ref(Type &_q_value) + template + static inline bool ref(std::atomic &_q_value) { return ++_q_value != 0; } - static inline bool deref(Type &_q_value) Q_DECL_NOTHROW + template + static inline bool deref(std::atomic &_q_value) Q_DECL_NOTHROW { return --_q_value != 0; } @@ -162,22 +162,25 @@ template struct QAtomicOps static inline Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; } - static + template static bool testAndSetRelaxed(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW { return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_relaxed); } + template static bool testAndSetAcquire(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW { return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acquire); } + template static bool testAndSetRelease(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW { return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_release); } + template static bool testAndSetOrdered(Type &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW { return _q_value.compare_exchange_strong(expectedValue, newValue, std::memory_order_acq_rel); @@ -186,22 +189,26 @@ template struct QAtomicOps static inline Q_DECL_CONSTEXPR bool isFetchAndStoreNative() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isFetchAndStoreWaitFree() Q_DECL_NOTHROW { return false; } - static T fetchAndStoreRelaxed(Type &_q_value, T newValue) Q_DECL_NOTHROW + template + static T fetchAndStoreRelaxed(std::atomic &_q_value, T newValue) Q_DECL_NOTHROW { return _q_value.exchange(newValue, std::memory_order_relaxed); } - static T fetchAndStoreAcquire(Type &_q_value, T newValue) Q_DECL_NOTHROW + template + static T fetchAndStoreAcquire(std::atomic &_q_value, T newValue) Q_DECL_NOTHROW { return _q_value.exchange(newValue, std::memory_order_acquire); } - static T fetchAndStoreRelease(Type &_q_value, T newValue) Q_DECL_NOTHROW + template + static T fetchAndStoreRelease(std::atomic &_q_value, T newValue) Q_DECL_NOTHROW { return _q_value.exchange(newValue, std::memory_order_release); } - static T fetchAndStoreOrdered(Type &_q_value, T newValue) Q_DECL_NOTHROW + template + static T fetchAndStoreOrdered(std::atomic &_q_value, T newValue) Q_DECL_NOTHROW { return _q_value.exchange(newValue, std::memory_order_acq_rel); } @@ -209,31 +216,31 @@ template struct QAtomicOps static inline Q_DECL_CONSTEXPR bool isFetchAndAddNative() Q_DECL_NOTHROW { return false; } static inline Q_DECL_CONSTEXPR bool isFetchAndAddWaitFree() Q_DECL_NOTHROW { return false; } - static - T fetchAndAddRelaxed(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW + template static inline + T fetchAndAddRelaxed(std::atomic &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) Q_DECL_NOTHROW { - return _q_value.fetch_add(valueToAdd * AddScale, + return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType::AddScale, std::memory_order_relaxed); } - static - T fetchAndAddAcquire(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW + template static inline + T fetchAndAddAcquire(std::atomic &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) Q_DECL_NOTHROW { - return _q_value.fetch_add(valueToAdd * AddScale, + return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType::AddScale, std::memory_order_acquire); } - static - T fetchAndAddRelease(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW + template static inline + T fetchAndAddRelease(std::atomic &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) Q_DECL_NOTHROW { - return _q_value.fetch_add(valueToAdd * AddScale, + return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType::AddScale, std::memory_order_release); } - static - T fetchAndAddOrdered(Type &_q_value, _AdditiveType valueToAdd) Q_DECL_NOTHROW + template static inline + T fetchAndAddOrdered(std::atomic &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) Q_DECL_NOTHROW { - return _q_value.fetch_add(valueToAdd * AddScale, + return _q_value.fetch_add(valueToAdd * QAtomicAdditiveType::AddScale, std::memory_order_acq_rel); } }; diff --git a/src/corelib/arch/qatomic_gcc.h b/src/corelib/arch/qatomic_gcc.h index bd296053e5..61e709655d 100644 --- a/src/corelib/arch/qatomic_gcc.h +++ b/src/corelib/arch/qatomic_gcc.h @@ -75,16 +75,17 @@ template<> struct QAtomicIntegerTraits { enum { IsInteger = 1 }; }; #define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_SOMETIMES_NATIVE #define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_SOMETIMES_NATIVE -template struct QAtomicOps: QGenericAtomicOps > +template struct QAtomicOps: QGenericAtomicOps > { // The GCC intrinsics all have fully-ordered memory semantics, so we define // only the xxxRelaxed functions. The exception is __sync_lock_and_test, // which has acquire semantics, so we need to define the Release and // Ordered versions too. - typedef T Type; + typedef X Type; #ifndef __ia64__ + template static T loadAcquire(const T &_q_value) Q_DECL_NOTHROW { T tmp = _q_value; @@ -92,6 +93,7 @@ template struct QAtomicOps: QGenericAtomicOps > return tmp; } + template static void storeRelease(T &_q_value, T newValue) Q_DECL_NOTHROW { __sync_synchronize(); @@ -101,28 +103,32 @@ template struct QAtomicOps: QGenericAtomicOps > static Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return false; } static Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return false; } + template static bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue) Q_DECL_NOTHROW { return __sync_bool_compare_and_swap(&_q_value, expectedValue, newValue); } + template static T fetchAndStoreRelaxed(T &_q_value, T newValue) Q_DECL_NOTHROW { return __sync_lock_test_and_set(&_q_value, newValue); } + template static T fetchAndStoreRelease(T &_q_value, T newValue) Q_DECL_NOTHROW { __sync_synchronize(); return __sync_lock_test_and_set(&_q_value, newValue); } + template static T fetchAndStoreOrdered(T &_q_value, T newValue) Q_DECL_NOTHROW { return fetchAndStoreRelease(_q_value, newValue); } - static + template static T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType::AdditiveT valueToAdd) Q_DECL_NOTHROW { return __sync_fetch_and_add(&_q_value, valueToAdd * QAtomicAdditiveType::AddScale); -- cgit v1.2.3