summaryrefslogtreecommitdiffstats
path: root/src/corelib/thread/qgenericatomic.h
diff options
context:
space:
mode:
authorThiago Macieira <thiago@kde.org>2011-07-31 17:40:40 -0300
committerQt by Nokia <qt-info@nokia.com>2012-01-20 12:26:26 +0100
commit1f843ca39ee59c5304009faf308ddce791614a02 (patch)
tree31e9e0bd87f30ecb26f93846a6380759b231013b /src/corelib/thread/qgenericatomic.h
parentf188d8a005e7c06869aa96bddb82053178c554e9 (diff)
Add the new QBasicAtomicXXX implementation - no backends yet
The new implementation is API- and ABI-compatible with the old implementation, provided that QBasicAtomicInt isn't used as an argument in a function call or the return value: now, QBasicAtomicInt is a typedef to QBasicAtomicInteger<int>. The new design is based on CRTP: the QGenericAtomicOps template class takes as a template parameter the derived class itself. This way, we implement a "poor man's virtual" without a virtual table and everything is inline. QGenericAtomicOps implements most of the atomics code that is repeated in many classes all over: * Acquire semantics are obtained by placing an acquire barrier after the Relaxed operation * Release semantics are obtained by placing a release barrier before the Relaxed operation * Ordered semantics are obtained by placing an ordered barrier before the Relaxed operation (either way would be fine) * fetchAndStoreRelaxed and fetchAndAddRelaxed are implemented on top of testAndSetRelaxed * ref and deref are implemented on top of fetchAndAddRelaxed It also adds load, loadAcquire, store and storeRelease: the default implementations of loadAcquire and storeRelease operate on a volatile variable and add barriers. There are no direct operators for accessing the value. Each architecture-specific implementation can override any of the functions or the memory barrier functions. It must implement at least the testAndSetRelaxed function. In addition, by specialising one template class, the implementations can allow QBasicAtomicInteger for additional types (of different sizes). At the very least, int, unsigned and pointers must be supported. Change-Id: I6da647e225bb330d3cfc16f84d0e7849dff85ec7 Reviewed-by: Bradley T. Hughes <bradley.hughes@nokia.com>
Diffstat (limited to 'src/corelib/thread/qgenericatomic.h')
-rw-r--r--src/corelib/thread/qgenericatomic.h232
1 files changed, 232 insertions, 0 deletions
diff --git a/src/corelib/thread/qgenericatomic.h b/src/corelib/thread/qgenericatomic.h
new file mode 100644
index 0000000000..575589befc
--- /dev/null
+++ b/src/corelib/thread/qgenericatomic.h
@@ -0,0 +1,232 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Thiago Macieira <thiago@kde.org>
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** GNU Lesser General Public License Usage
+** This file may be used under the terms of the GNU Lesser General Public
+** License version 2.1 as published by the Free Software Foundation and
+** appearing in the file LICENSE.LGPL included in the packaging of this
+** file. Please review the following information to ensure the GNU Lesser
+** General Public License version 2.1 requirements will be met:
+** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU General
+** Public License version 3.0 as published by the Free Software Foundation
+** and appearing in the file LICENSE.GPL included in the packaging of this
+** file. Please review the following information to ensure the GNU General
+** Public License version 3.0 requirements will be met:
+** http://www.gnu.org/copyleft/gpl.html.
+**
+** Other Usage
+** Alternatively, this file may be used in accordance with the terms and
+** conditions contained in a signed written agreement between you and Nokia.
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QGENERICATOMIC_H
+#define QGENERICATOMIC_H
+
+#include <QtCore/qglobal.h>
+
+QT_BEGIN_HEADER
+QT_BEGIN_NAMESPACE
+
+#if 0
+#pragma qt_sync_stop_processing
+#endif
+
+#ifdef Q_CC_GNU
+// lowercase is fine, we'll undef it below
+#define always_inline __attribute__((always_inline, gnu_inline))
+#else
+#define always_inline
+#endif
+
+template<typename T> struct QAtomicIntegerTraits { enum { IsInteger = 0 }; };
+
+template <typename T> struct QAtomicAdditiveType
+{
+ typedef T AdditiveT;
+ static const int AddScale = 1;
+};
+template <typename T> struct QAtomicAdditiveType<T *>
+{
+ typedef qptrdiff AdditiveT;
+ static const int AddScale = sizeof(T);
+};
+
+// not really atomic...
+template <typename BaseClass> struct QGenericAtomicOps
+{
+ template <typename T> struct AtomicType { typedef T Type; typedef T *PointerType; };
+
+ static void acquireMemoryFence() { BaseClass::orderedMemoryFence(); }
+ static void releaseMemoryFence() { BaseClass::orderedMemoryFence(); }
+ static void orderedMemoryFence() { }
+
+ template <typename T> static inline always_inline
+ T load(T &_q_value)
+ {
+ return _q_value;
+ }
+
+ template <typename T> static inline always_inline
+ void store(T &_q_value, T newValue)
+ {
+ _q_value = newValue;
+ }
+
+ template <typename T> static inline always_inline
+ T loadAcquire(T &_q_value)
+ {
+ T tmp = *static_cast<volatile T *>(&_q_value);
+ BaseClass::acquireMemoryFence();
+ return tmp;
+ }
+
+ template <typename T> static inline always_inline
+ void storeRelease(T &_q_value, T newValue)
+ {
+ BaseClass::releaseMemoryFence();
+ *static_cast<volatile T *>(&_q_value) = newValue;
+ }
+
+ static inline bool isReferenceCountingNative()
+ { return BaseClass::isFetchAndAddNative(); }
+ static inline bool isReferenceCountingWaitFree()
+ { return BaseClass::isFetchAndAddWaitFree(); }
+ template <typename T> static inline always_inline
+ bool ref(T &_q_value)
+ {
+ return BaseClass::fetchAndAddRelaxed(_q_value, 1) != T(-1);
+ }
+
+ template <typename T> static inline always_inline
+ bool deref(T &_q_value)
+ {
+ return BaseClass::fetchAndAddRelaxed(_q_value, -1) != 1;
+ }
+
+#if 0
+ // These functions have no default implementation
+ // Archictectures must implement them
+ static inline bool isTestAndSetNative();
+ static inline bool isTestAndSetWaitFree();
+ template <typename T> static inline
+ bool testAndSetRelaxed(T &_q_value, T expectedValue, T newValue);
+#endif
+
+ template <typename T> static inline always_inline
+ bool testAndSetAcquire(T &_q_value, T expectedValue, T newValue)
+ {
+ bool tmp = BaseClass::testAndSetRelaxed(_q_value, expectedValue, newValue);
+ BaseClass::acquireMemoryFence();
+ return tmp;
+ }
+
+ template <typename T> static inline always_inline
+ bool testAndSetRelease(T &_q_value, T expectedValue, T newValue)
+ {
+ BaseClass::releaseMemoryFence();
+ return BaseClass::testAndSetRelaxed(_q_value, expectedValue, newValue);
+ }
+
+ template <typename T> static inline always_inline
+ bool testAndSetOrdered(T &_q_value, T expectedValue, T newValue)
+ {
+ BaseClass::orderedMemoryFence();
+ return BaseClass::testAndSetRelaxed(_q_value, expectedValue, newValue);
+ }
+
+ static inline bool isFetchAndStoreNative() { return false; }
+ static inline bool isFetchAndStoreWaitFree() { return false; }
+
+ template <typename T> static inline always_inline
+ T fetchAndStoreRelaxed(T &_q_value, T newValue)
+ {
+ // implement fetchAndStore on top of testAndSet
+ forever {
+ register T tmp = load(_q_value);
+ if (BaseClass::testAndSetRelaxed(_q_value, tmp, newValue))
+ return tmp;
+ }
+ }
+
+ template <typename T> static inline always_inline
+ T fetchAndStoreAcquire(T &_q_value, T newValue)
+ {
+ T tmp = BaseClass::fetchAndStoreRelaxed(_q_value, newValue);
+ BaseClass::acquireMemoryFence();
+ return tmp;
+ }
+
+ template <typename T> static inline always_inline
+ T fetchAndStoreRelease(T &_q_value, T newValue)
+ {
+ BaseClass::releaseMemoryFence();
+ return BaseClass::fetchAndStoreRelaxed(_q_value, newValue);
+ }
+
+ template <typename T> static inline always_inline
+ T fetchAndStoreOrdered(T &_q_value, T newValue)
+ {
+ BaseClass::orderedMemoryFence();
+ return BaseClass::fetchAndStoreRelaxed(_q_value, newValue);
+ }
+
+ static inline bool isFetchAndAddNative() { return false; }
+ static inline bool isFetchAndAddWaitFree() { return false; }
+ template <typename T> static inline always_inline
+ T fetchAndAddRelaxed(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+ {
+ // implement fetchAndAdd on top of testAndSet
+ forever {
+ register T tmp = BaseClass::load(_q_value);
+ if (BaseClass::testAndSetRelaxed(_q_value, tmp, T(tmp + valueToAdd)))
+ return tmp;
+ }
+ }
+
+ template <typename T> static inline always_inline
+ T fetchAndAddAcquire(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+ {
+ T tmp = BaseClass::fetchAndAddRelaxed(_q_value, valueToAdd);
+ BaseClass::acquireMemoryFence();
+ return tmp;
+ }
+
+ template <typename T> static inline always_inline
+ T fetchAndAddRelease(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+ {
+ BaseClass::releaseMemoryFence();
+ return BaseClass::fetchAndAddRelaxed(_q_value, valueToAdd);
+ }
+
+ template <typename T> static inline always_inline
+ T fetchAndAddOrdered(T &_q_value, typename QAtomicAdditiveType<T>::AdditiveT valueToAdd)
+ {
+ BaseClass::orderedMemoryFence();
+ return BaseClass::fetchAndAddRelaxed(_q_value, valueToAdd);
+ }
+};
+
+#undef always_inline
+
+QT_END_NAMESPACE
+QT_END_HEADER
+
+#endif // QGENERICATOMIC_H