summaryrefslogtreecommitdiffstats
path: root/src/corelib/thread/qatomic_cxx11.h
diff options
context:
space:
mode:
authorFabian Kosmale <fabian.kosmale@qt.io>2022-02-04 11:50:45 +0100
committerFabian Kosmale <fabian.kosmale@qt.io>2022-02-05 07:26:55 +0100
commite400ba64a39703a87bbdbd213504bd384997a5a2 (patch)
tree85241dafdbddb77743fcf44de91e85599cda47f2 /src/corelib/thread/qatomic_cxx11.h
parent4d8cbf9863e5d6fffccc2c0a4adbd279fd4bae89 (diff)
Use acq_rel ordering semantics in qatomic_cxx11.h ref/deref
Change ref() and deref() in qatomic_cxx11.h to use acq_rel ordered semantics as documented instead of the stricter sequential consistency that was previously used implicitly. It was pointed out in the discussion on atomic reference counts that acq_rel semantics should be sufficient for reference counts. It also turned out that that is all that the definition of "Ordered" in the documentation really guarantees. Original-patch-by: Kevin Kofler <kevin.kofler@chello.at> Change-Id: I6cd50b34bdc699ff8b7c8fe6d303b28d71494731 Reviewed-by: Oswald Buddenhagen <oswald.buddenhagen@gmx.de> Reviewed-by: Thiago Macieira <thiago.macieira@intel.com> Reviewed-by: Marc Mutz <marc.mutz@qt.io>
Diffstat (limited to 'src/corelib/thread/qatomic_cxx11.h')
-rw-r--r--src/corelib/thread/qatomic_cxx11.h18
1 files changed, 16 insertions, 2 deletions
diff --git a/src/corelib/thread/qatomic_cxx11.h b/src/corelib/thread/qatomic_cxx11.h
index bf487bbf1f..0d4817249e 100644
--- a/src/corelib/thread/qatomic_cxx11.h
+++ b/src/corelib/thread/qatomic_cxx11.h
@@ -278,13 +278,27 @@ template <typename X> struct QAtomicOps
template <typename T>
static inline bool ref(std::atomic<T> &_q_value)
{
- return ++_q_value != 0;
+ /* Conceptually, we want to
+ * return ++_q_value != 0;
+ * However, that would be sequentially consistent, and thus stronger
+ * than what we need. Based on
+ * http://eel.is/c++draft/atomics.types.memop#6, we know that
+ * pre-increment is equivalent to fetch_add(1) + 1. Unlike
+ * pre-increment, fetch_add takes a memory order argument, so we can get
+ * the desired acquire-release semantics.
+ * One last gotcha is that fetch_add(1) + 1 would need to be converted
+ * back to T, because it's susceptible to integer promotion. To sidestep
+ * this issue and to avoid UB on signed overflow, we rewrite the
+ * expression to:
+ */
+ return _q_value.fetch_add(1, std::memory_order_acq_rel) != T(-1);
}
template <typename T>
static inline bool deref(std::atomic<T> &_q_value) noexcept
{
- return --_q_value != 0;
+ // compare with ref
+ return _q_value.fetch_sub(1, std::memory_order_acq_rel) != T(1);
}
static inline bool isTestAndSetNative() noexcept