summaryrefslogtreecommitdiffstats
path: root/src/corelib/arch
diff options
context:
space:
mode:
authorQt by Nokia <qt-info@nokia.com>2011-04-27 12:05:43 +0200
committeraxis <qt-info@nokia.com>2011-04-27 12:05:43 +0200
commit38be0d13830efd2d98281c645c3a60afe05ffece (patch)
tree6ea73f3ec77f7d153333779883e8120f82820abe /src/corelib/arch
Initial import from the monolithic Qt.
This is the beginning of revision history for this module. If you want to look at revision history older than this, please refer to the Qt Git wiki for how to use Git history grafting. At the time of writing, this wiki is located here: http://qt.gitorious.org/qt/pages/GitIntroductionWithQt If you have already performed the grafting and you don't see any history beyond this commit, try running "git log" with the "--follow" argument. Branched from the monolithic repo, Qt master branch, at commit 896db169ea224deb96c59ce8af800d019de63f12
Diffstat (limited to 'src/corelib/arch')
-rw-r--r--src/corelib/arch/alpha/arch.pri4
-rw-r--r--src/corelib/arch/alpha/qatomic_alpha.s239
-rw-r--r--src/corelib/arch/arch.pri38
-rw-r--r--src/corelib/arch/arm/arch.pri4
-rw-r--r--src/corelib/arch/arm/qatomic_arm.cpp72
-rw-r--r--src/corelib/arch/avr32/arch.pri3
-rw-r--r--src/corelib/arch/bfin/arch.pri3
-rw-r--r--src/corelib/arch/generic/arch.pri6
-rw-r--r--src/corelib/arch/generic/qatomic_generic_unix.cpp123
-rw-r--r--src/corelib/arch/generic/qatomic_generic_windows.cpp131
-rw-r--r--src/corelib/arch/i386/arch.pri4
-rw-r--r--src/corelib/arch/i386/qatomic_i386.s103
-rw-r--r--src/corelib/arch/ia64/arch.pri4
-rw-r--r--src/corelib/arch/ia64/qatomic_ia64.s74
-rw-r--r--src/corelib/arch/integrity/arch.pri3
-rw-r--r--src/corelib/arch/macosx/arch.pri6
-rw-r--r--src/corelib/arch/macosx/qatomic32_ppc.s169
-rw-r--r--src/corelib/arch/mips/arch.pri8
-rw-r--r--src/corelib/arch/mips/qatomic_mips32.s150
-rw-r--r--src/corelib/arch/mips/qatomic_mips64.s138
-rw-r--r--src/corelib/arch/parisc/arch.pri5
-rw-r--r--src/corelib/arch/parisc/q_ldcw.s62
-rw-r--r--src/corelib/arch/parisc/qatomic_parisc.cpp88
-rw-r--r--src/corelib/arch/powerpc/arch.pri10
-rw-r--r--src/corelib/arch/powerpc/qatomic32.s525
-rw-r--r--src/corelib/arch/powerpc/qatomic64.s533
-rw-r--r--src/corelib/arch/qatomic_alpha.h642
-rw-r--r--src/corelib/arch/qatomic_arch.h101
-rw-r--r--src/corelib/arch/qatomic_arm.h76
-rw-r--r--src/corelib/arch/qatomic_armv5.h431
-rw-r--r--src/corelib/arch/qatomic_armv6.h559
-rw-r--r--src/corelib/arch/qatomic_armv7.h61
-rw-r--r--src/corelib/arch/qatomic_avr32.h252
-rw-r--r--src/corelib/arch/qatomic_bfin.h343
-rw-r--r--src/corelib/arch/qatomic_bootstrap.h99
-rw-r--r--src/corelib/arch/qatomic_generic.h282
-rw-r--r--src/corelib/arch/qatomic_i386.h361
-rw-r--r--src/corelib/arch/qatomic_ia64.h813
-rw-r--r--src/corelib/arch/qatomic_integrity.h289
-rw-r--r--src/corelib/arch/qatomic_macosx.h57
-rw-r--r--src/corelib/arch/qatomic_mips.h892
-rw-r--r--src/corelib/arch/qatomic_parisc.h305
-rw-r--r--src/corelib/arch/qatomic_powerpc.h648
-rw-r--r--src/corelib/arch/qatomic_s390.h426
-rw-r--r--src/corelib/arch/qatomic_sh.h330
-rw-r--r--src/corelib/arch/qatomic_sh4a.h537
-rw-r--r--src/corelib/arch/qatomic_sparc.h525
-rw-r--r--src/corelib/arch/qatomic_symbian.h313
-rw-r--r--src/corelib/arch/qatomic_vxworks.h318
-rw-r--r--src/corelib/arch/qatomic_windows.h496
-rw-r--r--src/corelib/arch/qatomic_windowsce.h56
-rw-r--r--src/corelib/arch/qatomic_x86_64.h363
-rw-r--r--src/corelib/arch/s390/arch.pri3
-rw-r--r--src/corelib/arch/sh/arch.pri4
-rw-r--r--src/corelib/arch/sh/qatomic_sh.cpp72
-rw-r--r--src/corelib/arch/sh4a/arch.pri3
-rw-r--r--src/corelib/arch/sparc/arch.pri10
-rw-r--r--src/corelib/arch/sparc/qatomic32.s103
-rw-r--r--src/corelib/arch/sparc/qatomic64.s327
-rw-r--r--src/corelib/arch/sparc/qatomic_sparc.cpp92
-rw-r--r--src/corelib/arch/symbian/arch.pri18
-rw-r--r--src/corelib/arch/symbian/common_p.h106
-rw-r--r--src/corelib/arch/symbian/debugfunction.cpp1143
-rw-r--r--src/corelib/arch/symbian/dla_p.h969
-rw-r--r--src/corelib/arch/symbian/heap_hybrid.cpp3346
-rw-r--r--src/corelib/arch/symbian/heap_hybrid_p.h406
-rw-r--r--src/corelib/arch/symbian/page_alloc_p.h68
-rw-r--r--src/corelib/arch/symbian/qatomic_generic_armv6.cpp472
-rw-r--r--src/corelib/arch/symbian/qatomic_symbian.cpp527
-rw-r--r--src/corelib/arch/symbian/qt_heapsetup_symbian.cpp105
-rw-r--r--src/corelib/arch/symbian/qt_hybridheap_symbian_p.h178
-rw-r--r--src/corelib/arch/symbian/slab_p.h125
-rw-r--r--src/corelib/arch/vxworks/arch.pri6
-rw-r--r--src/corelib/arch/vxworks/qatomic_ppc.s415
-rw-r--r--src/corelib/arch/windows/arch.pri3
-rw-r--r--src/corelib/arch/x86_64/arch.pri4
-rw-r--r--src/corelib/arch/x86_64/qatomic_sun.s91
77 files changed, 20676 insertions, 0 deletions
diff --git a/src/corelib/arch/alpha/arch.pri b/src/corelib/arch/alpha/arch.pri
new file mode 100644
index 0000000000..448a531f07
--- /dev/null
+++ b/src/corelib/arch/alpha/arch.pri
@@ -0,0 +1,4 @@
+#
+# Alpha architecture
+#
+!*-g++*:SOURCES += $$QT_ARCH_CPP/qatomic_alpha.s
diff --git a/src/corelib/arch/alpha/qatomic_alpha.s b/src/corelib/arch/alpha/qatomic_alpha.s
new file mode 100644
index 0000000000..d225de794b
--- /dev/null
+++ b/src/corelib/arch/alpha/qatomic_alpha.s
@@ -0,0 +1,239 @@
+;/****************************************************************************
+;**
+;** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+;** All rights reserved.
+;** Contact: Nokia Corporation (qt-info@nokia.com)
+;**
+;** This file is part of the QtGui module of the Qt Toolkit.
+;**
+;** $QT_BEGIN_LICENSE:LGPL$
+;** No Commercial Usage
+;** This file contains pre-release code and may not be distributed.
+;** You may use this file in accordance with the terms and conditions
+;** contained in the Technology Preview License Agreement accompanying
+;** this package.
+;**
+;** GNU Lesser General Public License Usage
+;** Alternatively, this file may be used under the terms of the GNU Lesser
+;** General Public License version 2.1 as published by the Free Software
+;** Foundation and appearing in the file LICENSE.LGPL included in the
+;** packaging of this file. Please review the following information to
+;** ensure the GNU Lesser General Public License version 2.1 requirements
+;** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+;**
+;** In addition, as a special exception, Nokia gives you certain additional
+;** rights. These rights are described in the Nokia Qt LGPL Exception
+;** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+;**
+;** If you have questions regarding the use of this file, please contact
+;** Nokia at qt-info@nokia.com.
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;** $QT_END_LICENSE$
+;**
+;****************************************************************************/
+ .set noreorder
+ .set volatile
+ .set noat
+ .arch ev4
+ .text
+ .align 2
+ .align 4
+ .globl q_atomic_test_and_set_int
+ .ent q_atomic_test_and_set_int
+q_atomic_test_and_set_int:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ cmpeq $0,$17,$0
+ beq $0,3f
+ mov $18,$0
+ stl_c $0,0($16)
+ beq $0,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_test_and_set_int
+ .align 2
+ .align 4
+ .globl q_atomic_test_and_set_acquire_int
+ .ent q_atomic_test_and_set_acquire_int
+q_atomic_test_and_set_acquire_int:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ cmpeq $0,$17,$0
+ beq $0,3f
+ mov $18,$0
+ stl_c $0,0($16)
+ beq $0,2f
+ br 3f
+2: br 1b
+3: mb
+ addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_test_and_set_acquire_int
+ .align 2
+ .align 4
+ .globl q_atomic_test_and_set_release_int
+ .ent q_atomic_test_and_set_release_int
+q_atomic_test_and_set_release_int:
+ .frame $30,0,$26,0
+ .prologue 0
+ mb
+1: ldl_l $0,0($16)
+ cmpeq $0,$17,$0
+ beq $0,3f
+ mov $18,$0
+ stl_c $0,0($16)
+ beq $0,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_test_and_set_release_int
+ .align 2
+ .align 4
+ .globl q_atomic_test_and_set_ptr
+ .ent q_atomic_test_and_set_ptr
+q_atomic_test_and_set_ptr:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldq_l $0,0($16)
+ cmpeq $0,$17,$0
+ beq $0,3f
+ mov $18,$0
+ stq_c $0,0($16)
+ beq $0,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_test_and_set_ptr
+ .align 2
+ .align 4
+ .globl q_atomic_increment
+ .ent q_atomic_increment
+q_atomic_increment:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ addl $0,1,$1
+ stl_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ cmpeq $0,$1,$0
+ xor $0,1,$0
+ ret $31,($26),1
+ .end q_atomic_increment
+ .align 2
+ .align 4
+ .globl q_atomic_decrement
+ .ent q_atomic_decrement
+q_atomic_decrement:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ subl $0,1,$1
+ stl_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ cmpeq $0,1,$0
+ xor $0,1,$0
+ ret $31,($26),1
+ .end q_atomic_decrement
+ .align 2
+ .align 4
+ .globl q_atomic_set_int
+ .ent q_atomic_set_int
+q_atomic_set_int:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ mov $17,$1
+ stl_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_set_int
+ .align 2
+ .align 4
+ .globl q_atomic_set_ptr
+ .ent q_atomic_set_ptr
+q_atomic_set_ptr:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldq_l $0,0($16)
+ mov $17,$1
+ stq_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: ret $31,($26),1
+ .end q_atomic_set_ptr
+
+ .align 2
+ .align 4
+ .globl q_atomic_fetch_and_add_int
+ .ent q_atomic_fetch_and_add_int
+q_atomic_fetch_and_add_int:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ addl $0,$17,$1
+ stl_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_fetch_and_add_int
+
+ .align 2
+ .align 4
+ .globl q_atomic_fetch_and_add_acquire_int
+ .ent q_atomic_fetch_and_add_acquire_int
+q_atomic_fetch_and_add_acquire_int:
+ .frame $30,0,$26,0
+ .prologue 0
+1: ldl_l $0,0($16)
+ addl $0,$17,$1
+ stl_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: mb
+ addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_fetch_and_add_acquire_int
+
+ .align 2
+ .align 4
+ .globl q_atomic_fetch_and_add_release_int
+ .ent q_atomic_fetch_and_add_release_int
+q_atomic_fetch_and_add_release_int:
+ .frame $30,0,$26,0
+ .prologue 0
+ mb
+1: ldl_l $0,0($16)
+ addl $0,$17,$1
+ stl_c $1,0($16)
+ beq $1,2f
+ br 3f
+2: br 1b
+3: addl $31,$0,$0
+ ret $31,($26),1
+ .end q_atomic_fetch_and_add_release_int
diff --git a/src/corelib/arch/arch.pri b/src/corelib/arch/arch.pri
new file mode 100644
index 0000000000..cd23e5e855
--- /dev/null
+++ b/src/corelib/arch/arch.pri
@@ -0,0 +1,38 @@
+win32:HEADERS += arch/qatomic_windows.h \
+ arch/qatomic_generic.h
+
+win32-g++*:HEADERS += arch/qatomic_i386.h \
+ arch/qatomic_x86_64.h
+
+mac:HEADERS += arch/qatomic_macosx.h \
+ arch/qatomic_generic.h
+
+symbian:HEADERS += arch/qatomic_symbian.h \
+ arch/qatomic_generic.h
+
+vxworks:HEADERS += arch/qatomic_vxworks.h
+
+integrity:HEADERS += arch/qatomic_integrity.h
+
+!wince*:!win32:!mac:!symbian:HEADERS += arch/qatomic_alpha.h \
+ arch/qatomic_avr32.h \
+ arch/qatomic_ia64.h \
+ arch/qatomic_parisc.h \
+ arch/qatomic_sparc.h \
+ arch/qatomic_arch.h \
+ arch/qatomic_generic.h \
+ arch/qatomic_powerpc.h \
+ arch/qatomic_arm.h \
+ arch/qatomic_armv5.h \
+ arch/qatomic_armv6.h \
+ arch/qatomic_armv7.h \
+ arch/qatomic_i386.h \
+ arch/qatomic_mips.h \
+ arch/qatomic_s390.h \
+ arch/qatomic_x86_64.h \
+ arch/qatomic_sh.h \
+ arch/qatomic_sh4a.h
+
+QT_ARCH_CPP = $$QT_SOURCE_TREE/src/corelib/arch/$$QT_ARCH
+DEPENDPATH += $$QT_ARCH_CPP
+include($$QT_ARCH_CPP/arch.pri, "", true)
diff --git a/src/corelib/arch/arm/arch.pri b/src/corelib/arch/arm/arch.pri
new file mode 100644
index 0000000000..79e4bfc115
--- /dev/null
+++ b/src/corelib/arch/arm/arch.pri
@@ -0,0 +1,4 @@
+#
+# ARM architecture
+#
+SOURCES += $$QT_ARCH_CPP/qatomic_arm.cpp
diff --git a/src/corelib/arch/arm/qatomic_arm.cpp b/src/corelib/arch/arm/qatomic_arm.cpp
new file mode 100644
index 0000000000..8f06515247
--- /dev/null
+++ b/src/corelib/arch/arm/qatomic_arm.cpp
@@ -0,0 +1,72 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QtCore/qglobal.h>
+
+#include <unistd.h>
+#ifdef _POSIX_PRIORITY_SCHEDULING
+# include <sched.h>
+#endif
+#include <time.h>
+
+QT_BEGIN_NAMESPACE
+
+QT_USE_NAMESPACE
+
+Q_CORE_EXPORT char q_atomic_lock = 0;
+
+Q_CORE_EXPORT void qt_atomic_yield(int *count)
+{
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ if((*count)++ < 50) {
+ sched_yield();
+ } else
+#endif
+ {
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = 2000001;
+ nanosleep(&tm, NULL);
+ *count = 0;
+ }
+}
+
+QT_END_NAMESPACE
diff --git a/src/corelib/arch/avr32/arch.pri b/src/corelib/arch/avr32/arch.pri
new file mode 100644
index 0000000000..37f231ee61
--- /dev/null
+++ b/src/corelib/arch/avr32/arch.pri
@@ -0,0 +1,3 @@
+#
+# AVR32 architecture
+#
diff --git a/src/corelib/arch/bfin/arch.pri b/src/corelib/arch/bfin/arch.pri
new file mode 100644
index 0000000000..fa198ae8bb
--- /dev/null
+++ b/src/corelib/arch/bfin/arch.pri
@@ -0,0 +1,3 @@
+#
+# Blackfin architecture
+#
diff --git a/src/corelib/arch/generic/arch.pri b/src/corelib/arch/generic/arch.pri
new file mode 100644
index 0000000000..8fee63fa5d
--- /dev/null
+++ b/src/corelib/arch/generic/arch.pri
@@ -0,0 +1,6 @@
+#
+# 'generic' architecture
+#
+
+unix:SOURCES += qatomic_generic_unix.cpp
+win32:SOURCES += qatomic_generic_windows.cpp
diff --git a/src/corelib/arch/generic/qatomic_generic_unix.cpp b/src/corelib/arch/generic/qatomic_generic_unix.cpp
new file mode 100644
index 0000000000..5ab4c6b943
--- /dev/null
+++ b/src/corelib/arch/generic/qatomic_generic_unix.cpp
@@ -0,0 +1,123 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#if !defined(Q_OS_SYMBIAN) || (defined(Q_OS_SYMBIAN) && !defined(Q_CC_RVCT))
+
+#include "qplatformdefs.h"
+
+#include <QtCore/qatomic.h>
+
+QT_BEGIN_NAMESPACE
+static pthread_mutex_t qAtomicMutex = PTHREAD_MUTEX_INITIALIZER;
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
+{
+ bool returnValue = false;
+ pthread_mutex_lock(&qAtomicMutex);
+ if (*_q_value == expectedValue) {
+ *_q_value = newValue;
+ returnValue = true;
+ }
+ pthread_mutex_unlock(&qAtomicMutex);
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
+{
+ int returnValue;
+ pthread_mutex_lock(&qAtomicMutex);
+ returnValue = *_q_value;
+ *_q_value = newValue;
+ pthread_mutex_unlock(&qAtomicMutex);
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
+{
+ int returnValue;
+ pthread_mutex_lock(&qAtomicMutex);
+ returnValue = *_q_value;
+ *_q_value += valueToAdd;
+ pthread_mutex_unlock(&qAtomicMutex);
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ bool returnValue = false;
+ pthread_mutex_lock(&qAtomicMutex);
+ if (*_q_value == expectedValue) {
+ *_q_value = newValue;
+ returnValue = true;
+ }
+ pthread_mutex_unlock(&qAtomicMutex);
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
+{
+ void *returnValue;
+ pthread_mutex_lock(&qAtomicMutex);
+ returnValue = *_q_value;
+ *_q_value = newValue;
+ pthread_mutex_unlock(&qAtomicMutex);
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ void *returnValue;
+ pthread_mutex_lock(&qAtomicMutex);
+ returnValue = *_q_value;
+ *_q_value = reinterpret_cast<char *>(returnValue) + valueToAdd;
+ pthread_mutex_unlock(&qAtomicMutex);
+ return returnValue;
+}
+QT_END_NAMESPACE
+#endif //!defined(Q_OS_SYMBIAN) && !defined(Q_CC_RVCT)
diff --git a/src/corelib/arch/generic/qatomic_generic_windows.cpp b/src/corelib/arch/generic/qatomic_generic_windows.cpp
new file mode 100644
index 0000000000..c2496db45c
--- /dev/null
+++ b/src/corelib/arch/generic/qatomic_generic_windows.cpp
@@ -0,0 +1,131 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qplatformdefs.h"
+
+#include <QtCore/qatomic.h>
+
+
+class QCriticalSection
+{
+public:
+ QCriticalSection() { InitializeCriticalSection(&section); }
+ ~QCriticalSection() { DeleteCriticalSection(&section); }
+ void lock() { EnterCriticalSection(&section); }
+ void unlock() { LeaveCriticalSection(&section); }
+
+private:
+ CRITICAL_SECTION section;
+};
+
+static QCriticalSection qAtomicCriticalSection;
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
+{
+ bool returnValue = false;
+ qAtomicCriticalSection.lock();
+ if (*_q_value == expectedValue) {
+ *_q_value = newValue;
+ returnValue = true;
+ }
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
+{
+ int returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value = newValue;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
+{
+ int returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value += valueToAdd;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ bool returnValue = false;
+ qAtomicCriticalSection.lock();
+ if (*_q_value == expectedValue) {
+ *_q_value = newValue;
+ returnValue = true;
+ }
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
+{
+ void *returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value = newValue;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ void *returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value = reinterpret_cast<char *>(returnValue) + valueToAdd;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
diff --git a/src/corelib/arch/i386/arch.pri b/src/corelib/arch/i386/arch.pri
new file mode 100644
index 0000000000..3101dae01b
--- /dev/null
+++ b/src/corelib/arch/i386/arch.pri
@@ -0,0 +1,4 @@
+#
+# i386 architecture
+#
+!*-g++*:!*-icc*:SOURCES += $$QT_ARCH_CPP/qatomic_i386.s
diff --git a/src/corelib/arch/i386/qatomic_i386.s b/src/corelib/arch/i386/qatomic_i386.s
new file mode 100644
index 0000000000..08158f926b
--- /dev/null
+++ b/src/corelib/arch/i386/qatomic_i386.s
@@ -0,0 +1,103 @@
+ .text
+
+ .align 4,0x90
+ .globl q_atomic_test_and_set_int
+q_atomic_test_and_set_int:
+ movl 4(%esp),%ecx
+ movl 8(%esp),%eax
+ movl 12(%esp),%edx
+ lock
+ cmpxchgl %edx,(%ecx)
+ mov $0,%eax
+ sete %al
+ ret
+ .align 4,0x90
+ .type q_atomic_test_and_set_int,@function
+ .size q_atomic_test_and_set_int,.-q_atomic_test_and_set_int
+
+ .align 4,0x90
+ .globl q_atomic_test_and_set_ptr
+q_atomic_test_and_set_ptr:
+ movl 4(%esp),%ecx
+ movl 8(%esp),%eax
+ movl 12(%esp),%edx
+ lock
+ cmpxchgl %edx,(%ecx)
+ mov $0,%eax
+ sete %al
+ ret
+ .align 4,0x90
+ .type q_atomic_test_and_set_ptr,@function
+ .size q_atomic_test_and_set_ptr,.-q_atomic_test_and_set_ptr
+
+ .align 4,0x90
+ .globl q_atomic_increment
+q_atomic_increment:
+ movl 4(%esp), %ecx
+ lock
+ incl (%ecx)
+ mov $0,%eax
+ setne %al
+ ret
+ .align 4,0x90
+ .type q_atomic_increment,@function
+ .size q_atomic_increment,.-q_atomic_increment
+
+ .align 4,0x90
+ .globl q_atomic_decrement
+q_atomic_decrement:
+ movl 4(%esp), %ecx
+ lock
+ decl (%ecx)
+ mov $0,%eax
+ setne %al
+ ret
+ .align 4,0x90
+ .type q_atomic_decrement,@function
+ .size q_atomic_decrement,.-q_atomic_decrement
+
+ .align 4,0x90
+ .globl q_atomic_set_int
+q_atomic_set_int:
+ mov 4(%esp),%ecx
+ mov 8(%esp),%eax
+ xchgl %eax,(%ecx)
+ ret
+ .align 4,0x90
+ .type q_atomic_set_int,@function
+ .size q_atomic_set_int,.-q_atomic_set_int
+
+ .align 4,0x90
+ .globl q_atomic_set_ptr
+q_atomic_set_ptr:
+ mov 4(%esp),%ecx
+ mov 8(%esp),%eax
+ xchgl %eax,(%ecx)
+ ret
+ .align 4,0x90
+ .type q_atomic_set_ptr,@function
+ .size q_atomic_set_ptr,.-q_atomic_set_ptr
+
+ .align 4,0x90
+ .globl q_atomic_fetch_and_add_int
+q_atomic_fetch_and_add_int:
+ mov 4(%esp),%ecx
+ mov 8(%esp),%eax
+ lock
+ xadd %eax,(%ecx)
+ ret
+ .align 4,0x90
+ .type q_atomic_fetch_and_add_int,@function
+ .size q_atomic_fetch_and_add_int,.-q_atomic_fetch_and_add_int
+
+ .align 4,0x90
+ .globl q_atomic_fetch_and_add_ptr
+q_atomic_fetch_and_add_ptr:
+ mov 4(%esp),%ecx
+ mov 8(%esp),%eax
+ lock
+ xadd %eax,(%ecx)
+ ret
+ .align 4,0x90
+ .type q_atomic_fetch_and_add_ptr,@function
+ .size q_atomic_fetch_and_add_ptr,.-q_atomic_fetch_and_add_ptr
diff --git a/src/corelib/arch/ia64/arch.pri b/src/corelib/arch/ia64/arch.pri
new file mode 100644
index 0000000000..63afa967a4
--- /dev/null
+++ b/src/corelib/arch/ia64/arch.pri
@@ -0,0 +1,4 @@
+#
+# Intel Itanium architecture
+#
+!*-g++:!*-icc:!hpuxi-acc-*:SOURCES += $$QT_ARCH_CPP/qatomic_ia64.s
diff --git a/src/corelib/arch/ia64/qatomic_ia64.s b/src/corelib/arch/ia64/qatomic_ia64.s
new file mode 100644
index 0000000000..c988cfcd10
--- /dev/null
+++ b/src/corelib/arch/ia64/qatomic_ia64.s
@@ -0,0 +1,74 @@
+;/****************************************************************************
+;**
+;** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+;** All rights reserved.
+;** Contact: Nokia Corporation (qt-info@nokia.com)
+;**
+;** This file is part of the QtGui module of the Qt Toolkit.
+;**
+;** $QT_BEGIN_LICENSE:LGPL$
+;** No Commercial Usage
+;** This file contains pre-release code and may not be distributed.
+;** You may use this file in accordance with the terms and conditions
+;** contained in the Technology Preview License Agreement accompanying
+;** this package.
+;**
+;** GNU Lesser General Public License Usage
+;** Alternatively, this file may be used under the terms of the GNU Lesser
+;** General Public License version 2.1 as published by the Free Software
+;** Foundation and appearing in the file LICENSE.LGPL included in the
+;** packaging of this file. Please review the following information to
+;** ensure the GNU Lesser General Public License version 2.1 requirements
+;** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+;**
+;** In addition, as a special exception, Nokia gives you certain additional
+;** rights. These rights are described in the Nokia Qt LGPL Exception
+;** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+;**
+;** If you have questions regarding the use of this file, please contact
+;** Nokia at qt-info@nokia.com.
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;** $QT_END_LICENSE$
+;**
+;****************************************************************************/
+ .pred.safe_across_calls p1-p5,p16-p63
+.text
+ .align 16
+ .global q_atomic_test_and_set_int#
+ .proc q_atomic_test_and_set_int#
+q_atomic_test_and_set_int:
+ .prologue
+ .body
+ mov ar.ccv=r33
+ ;;
+ cmpxchg4.acq r34=[r32],r34,ar.ccv
+ ;;
+ cmp4.eq p6, p7 = r33, r34
+ ;;
+ (p6) addl r8 = 1, r0
+ (p7) mov r8 = r0
+ br.ret.sptk.many b0
+ .endp q_atomic_test_and_set_int#
+ .align 16
+ .global q_atomic_test_and_set_ptr#
+ .proc q_atomic_test_and_set_ptr#
+q_atomic_test_and_set_ptr:
+ .prologue
+ .body
+ mov ar.ccv=r33
+ ;;
+ cmpxchg8.acq r34=[r32],r34,ar.ccv
+ ;;
+ cmp.eq p6, p7 = r33, r34
+ ;;
+ (p6) addl r8 = 1, r0
+ (p7) mov r8 = r0
+ br.ret.sptk.many b0
+ .endp q_atomic_test_and_set_ptr#
diff --git a/src/corelib/arch/integrity/arch.pri b/src/corelib/arch/integrity/arch.pri
new file mode 100644
index 0000000000..2c4196ec32
--- /dev/null
+++ b/src/corelib/arch/integrity/arch.pri
@@ -0,0 +1,3 @@
+#
+# INTEGRITY RTOS architecture
+#
diff --git a/src/corelib/arch/macosx/arch.pri b/src/corelib/arch/macosx/arch.pri
new file mode 100644
index 0000000000..a2b1bf759a
--- /dev/null
+++ b/src/corelib/arch/macosx/arch.pri
@@ -0,0 +1,6 @@
+#
+# Mac OS X architecture
+#
+
+# Left blank intentionally since all the current compilers that we support can
+# handle in-line assembly.
diff --git a/src/corelib/arch/macosx/qatomic32_ppc.s b/src/corelib/arch/macosx/qatomic32_ppc.s
new file mode 100644
index 0000000000..c9a318dccd
--- /dev/null
+++ b/src/corelib/arch/macosx/qatomic32_ppc.s
@@ -0,0 +1,169 @@
+;/****************************************************************************
+;**
+;** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+;** All rights reserved.
+;** Contact: Nokia Corporation (qt-info@nokia.com)
+;**
+;** This file is part of the QtGui module of the Qt Toolkit.
+;**
+;** $QT_BEGIN_LICENSE:LGPL$
+;** No Commercial Usage
+;** This file contains pre-release code and may not be distributed.
+;** You may use this file in accordance with the terms and conditions
+;** contained in the Technology Preview License Agreement accompanying
+;** this package.
+;**
+;** GNU Lesser General Public License Usage
+;** Alternatively, this file may be used under the terms of the GNU Lesser
+;** General Public License version 2.1 as published by the Free Software
+;** Foundation and appearing in the file LICENSE.LGPL included in the
+;** packaging of this file. Please review the following information to
+;** ensure the GNU Lesser General Public License version 2.1 requirements
+;** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+;**
+;** In addition, as a special exception, Nokia gives you certain additional
+;** rights. These rights are described in the Nokia Qt LGPL Exception
+;** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+;**
+;** If you have questions regarding the use of this file, please contact
+;** Nokia at qt-info@nokia.com.
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;** $QT_END_LICENSE$
+;**
+;****************************************************************************/
+ .section __TEXT,__text,regular,pure_instructions
+ .section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+ .align 2
+ .globl _q_atomic_test_and_set_int
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_test_and_set_int:
+ lwarx r6,0,r3
+ cmpw r6,r4
+ bne- $+20
+ stwcx. r5,0,r3
+ bne- $-16
+ addi r3,0,1
+ blr
+ addi r3,0,0
+ blr
+
+ .align 2
+ .globl _q_atomic_test_and_set_acquire_int
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_test_and_set_acquire_int:
+ lwarx r6,0,r3
+ cmpw r6,r4
+ bne- $+20
+ stwcx. r5,0,r3
+ bne- $-16
+ addi r3,0,1
+ b $+8
+ addi r3,0,0
+ eieio
+ blr
+
+ .align 2
+ .globl _q_atomic_test_and_set_release_int
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_test_and_set_release_int:
+ eieio
+ lwarx r6,0,r3
+ cmpw r6,r4
+ bne- $+20
+ stwcx. r5,0,r3
+ bne- $-16
+ addi r3,0,1
+ blr
+ addi r3,0,0
+ blr
+
+ .align 2
+ .globl _q_atomic_test_and_set_ptr
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_test_and_set_ptr:
+ lwarx r6,0,r3
+ cmpw r6,r4
+ bne- $+20
+ stwcx. r5,0,r3
+ bne- $-16
+ addi r3,0,1
+ blr
+ addi r3,0,0
+ blr
+
+ .align 2
+ .globl _q_atomic_increment
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_increment:
+ lwarx r4,0,r3
+ addi r4,r4,1
+ stwcx. r4,0,r3
+ bne- $-12
+ mr r3,r4
+ blr
+
+ .align 2
+ .globl _q_atomic_decrement
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_decrement:
+ lwarx r4,0,r3
+ subi r4,r4,1
+ stwcx. r4,0,r3
+ bne- $-12
+ mr r3,r4
+ blr
+
+ .align 2
+ .globl _q_atomic_set_int
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_set_int:
+ lwarx r5,0,r3
+ stwcx. r4,0,r3
+ bne- $-8
+ mr r3,r5
+ blr
+
+ .align 2
+ .globl _q_atomic_set_ptr
+ .section __TEXT,__text,regular,pure_instructions
+ .align 2
+_q_atomic_set_ptr:
+ lwarx r5,0,r3
+ stwcx. r4,0,r3
+ bne- $-8
+ mr r3,r5
+ blr
+
+.globl q_atomic_test_and_set_int.eh
+ q_atomic_test_and_set_int.eh = 0
+.globl q_atomic_test_and_set_ptr.eh
+ q_atomic_test_and_set_ptr.eh = 0
+.globl q_atomic_increment.eh
+ q_atomic_increment.eh = 0
+.globl q_atomic_decrement.eh
+ q_atomic_decrement.eh = 0
+.globl q_atomic_set_int.eh
+ q_atomic_set_int.eh = 0
+.globl q_atomic_set_ptr.eh
+ q_atomic_set_ptr.eh = 0
+.data
+.constructor
+.data
+.destructor
+.align 1
diff --git a/src/corelib/arch/mips/arch.pri b/src/corelib/arch/mips/arch.pri
new file mode 100644
index 0000000000..296c845ab3
--- /dev/null
+++ b/src/corelib/arch/mips/arch.pri
@@ -0,0 +1,8 @@
+#
+# MIPS 3/4 architecture
+#
+
+# note: even though we use inline assembler with gcc, we always
+# include the compiled version to keep binary compatibility
+*-64:SOURCES += $$QT_ARCH_CPP/qatomic_mips64.s
+else:SOURCES += $$QT_ARCH_CPP/qatomic_mips32.s
diff --git a/src/corelib/arch/mips/qatomic_mips32.s b/src/corelib/arch/mips/qatomic_mips32.s
new file mode 100644
index 0000000000..d4c0191cfa
--- /dev/null
+++ b/src/corelib/arch/mips/qatomic_mips32.s
@@ -0,0 +1,150 @@
+;/****************************************************************************
+;**
+;** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+;** All rights reserved.
+;** Contact: Nokia Corporation (qt-info@nokia.com)
+;**
+;** This file is part of the QtGui module of the Qt Toolkit.
+;**
+;** $QT_BEGIN_LICENSE:LGPL$
+;** No Commercial Usage
+;** This file contains pre-release code and may not be distributed.
+;** You may use this file in accordance with the terms and conditions
+;** contained in the Technology Preview License Agreement accompanying
+;** this package.
+;**
+;** GNU Lesser General Public License Usage
+;** Alternatively, this file may be used under the terms of the GNU Lesser
+;** General Public License version 2.1 as published by the Free Software
+;** Foundation and appearing in the file LICENSE.LGPL included in the
+;** packaging of this file. Please review the following information to
+;** ensure the GNU Lesser General Public License version 2.1 requirements
+;** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+;**
+;** In addition, as a special exception, Nokia gives you certain additional
+;** rights. These rights are described in the Nokia Qt LGPL Exception
+;** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+;**
+;** If you have questions regarding the use of this file, please contact
+;** Nokia at qt-info@nokia.com.
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;** $QT_END_LICENSE$
+;**
+;****************************************************************************/
+ .set nobopt
+ .set noreorder
+ .option pic2
+ .text
+
+ .globl q_atomic_test_and_set_int
+ .ent q_atomic_test_and_set_int
+ .set mips2
+q_atomic_test_and_set_int:
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .set mips0
+ .end q_atomic_test_and_set_int
+
+ .globl q_atomic_test_and_set_acquire_int
+ .ent q_atomic_test_and_set_acquire_int
+ .set mips2
+q_atomic_test_and_set_acquire_int:
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: sync
+ jr $31
+ move $2,$0
+ .set mips0
+ .end q_atomic_test_and_set_acquire_int
+
+ .globl q_atomic_test_and_set_release_int
+ .ent q_atomic_test_and_set_release_int
+ .set mips2
+q_atomic_test_and_set_release_int:
+ sync
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .set mips0
+ .end q_atomic_test_and_set_release_int
+
+ .globl q_atomic_test_and_set_ptr
+ .ent q_atomic_test_and_set_ptr
+ .set mips2
+q_atomic_test_and_set_ptr:
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .set mips0
+ .end q_atomic_test_and_set_ptr
+
+ .globl q_atomic_test_and_set_acquire_ptr
+ .ent q_atomic_test_and_set_acquire_ptr
+ .set mips2
+q_atomic_test_and_set_acquire_ptr:
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: sync
+ jr $31
+ move $2,$0
+ .set mips0
+ .end q_atomic_test_and_set_acquire_ptr
+
+ .globl q_atomic_test_and_set_release_ptr
+ .ent q_atomic_test_and_set_release_ptr
+ .set mips2
+q_atomic_test_and_set_release_ptr:
+ sync
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .set mips0
+ .end q_atomic_test_and_set_release_ptr
diff --git a/src/corelib/arch/mips/qatomic_mips64.s b/src/corelib/arch/mips/qatomic_mips64.s
new file mode 100644
index 0000000000..993991f0b7
--- /dev/null
+++ b/src/corelib/arch/mips/qatomic_mips64.s
@@ -0,0 +1,138 @@
+;/****************************************************************************
+;**
+;** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+;** All rights reserved.
+;** Contact: Nokia Corporation (qt-info@nokia.com)
+;**
+;** This file is part of the QtGui module of the Qt Toolkit.
+;**
+;** $QT_BEGIN_LICENSE:LGPL$
+;** No Commercial Usage
+;** This file contains pre-release code and may not be distributed.
+;** You may use this file in accordance with the terms and conditions
+;** contained in the Technology Preview License Agreement accompanying
+;** this package.
+;**
+;** GNU Lesser General Public License Usage
+;** Alternatively, this file may be used under the terms of the GNU Lesser
+;** General Public License version 2.1 as published by the Free Software
+;** Foundation and appearing in the file LICENSE.LGPL included in the
+;** packaging of this file. Please review the following information to
+;** ensure the GNU Lesser General Public License version 2.1 requirements
+;** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+;**
+;** In addition, as a special exception, Nokia gives you certain additional
+;** rights. These rights are described in the Nokia Qt LGPL Exception
+;** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+;**
+;** If you have questions regarding the use of this file, please contact
+;** Nokia at qt-info@nokia.com.
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;** $QT_END_LICENSE$
+;**
+;****************************************************************************/
+ .set nobopt
+ .set noreorder
+ .option pic2
+ .text
+
+ .globl q_atomic_test_and_set_int
+ .ent q_atomic_test_and_set_int
+q_atomic_test_and_set_int:
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .end q_atomic_test_and_set_int
+
+ .globl q_atomic_test_and_set_acquire_int
+ .ent q_atomic_test_and_set_acquire_int
+q_atomic_test_and_set_acquire_int:
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: sync
+ jr $31
+ move $2,$0
+ .end q_atomic_test_and_set_acquire_int
+
+ .globl q_atomic_test_and_set_release_int
+ .ent q_atomic_test_and_set_release_int
+q_atomic_test_and_set_release_int:
+ sync
+1: ll $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ sc $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .end q_atomic_test_and_set_release_int
+
+ .globl q_atomic_test_and_set_ptr
+ .ent q_atomic_test_and_set_ptr
+q_atomic_test_and_set_ptr:
+1: lld $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ scd $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .end q_atomic_test_and_set_ptr
+
+ .globl q_atomic_test_and_set_acquire_ptr
+ .ent q_atomic_test_and_set_acquire_ptr
+q_atomic_test_and_set_acquire_ptr:
+1: lld $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ scd $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: sync
+ jr $31
+ move $2,$0
+ .end q_atomic_test_and_set_acquire_ptr
+
+ .globl q_atomic_test_and_set_release_ptr
+ .ent q_atomic_test_and_set_release_ptr
+q_atomic_test_and_set_release_ptr:
+ sync
+1: lld $8,0($4)
+ bne $8,$5,2f
+ move $2,$6
+ scd $2,0($4)
+ beqz $2,1b
+ nop
+ jr $31
+ nop
+2: jr $31
+ move $2,$0
+ .end q_atomic_test_and_set_release_ptr
diff --git a/src/corelib/arch/parisc/arch.pri b/src/corelib/arch/parisc/arch.pri
new file mode 100644
index 0000000000..fab2897f04
--- /dev/null
+++ b/src/corelib/arch/parisc/arch.pri
@@ -0,0 +1,5 @@
+#
+# HP PA-RISC architecture
+#
+SOURCES += $$QT_ARCH_CPP/q_ldcw.s \
+ $$QT_ARCH_CPP/qatomic_parisc.cpp
diff --git a/src/corelib/arch/parisc/q_ldcw.s b/src/corelib/arch/parisc/q_ldcw.s
new file mode 100644
index 0000000000..f8f40467ec
--- /dev/null
+++ b/src/corelib/arch/parisc/q_ldcw.s
@@ -0,0 +1,62 @@
+;/****************************************************************************
+;**
+;** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+;** All rights reserved.
+;** Contact: Nokia Corporation (qt-info@nokia.com)
+;**
+;** This file is part of the QtGui module of the Qt Toolkit.
+;**
+;** $QT_BEGIN_LICENSE:LGPL$
+;** No Commercial Usage
+;** This file contains pre-release code and may not be distributed.
+;** You may use this file in accordance with the terms and conditions
+;** contained in the Technology Preview License Agreement accompanying
+;** this package.
+;**
+;** GNU Lesser General Public License Usage
+;** Alternatively, this file may be used under the terms of the GNU Lesser
+;** General Public License version 2.1 as published by the Free Software
+;** Foundation and appearing in the file LICENSE.LGPL included in the
+;** packaging of this file. Please review the following information to
+;** ensure the GNU Lesser General Public License version 2.1 requirements
+;** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+;**
+;** In addition, as a special exception, Nokia gives you certain additional
+;** rights. These rights are described in the Nokia Qt LGPL Exception
+;** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+;**
+;** If you have questions regarding the use of this file, please contact
+;** Nokia at qt-info@nokia.com.
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;**
+;** $QT_END_LICENSE$
+;**
+;****************************************************************************/
+ .SPACE $PRIVATE$
+ .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31
+ .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82
+ .SPACE $TEXT$
+ .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44
+ .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY
+ .IMPORT $global$,DATA
+ .IMPORT $$dyncall,MILLICODE
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+
+ .align 4
+ .EXPORT q_ldcw,ENTRY,PRIV_LEV=3,ARGW0=GR,RTNVAL=GR
+q_ldcw
+ .PROC
+ .CALLINFO FRAME=0,CALLS,SAVE_RP
+ .ENTRY
+ ldcw 0(%r26),%r1
+ bv %r0(%r2)
+ copy %r1,%r28
+ .EXIT
+ .PROCEND
diff --git a/src/corelib/arch/parisc/qatomic_parisc.cpp b/src/corelib/arch/parisc/qatomic_parisc.cpp
new file mode 100644
index 0000000000..fa446c5d7b
--- /dev/null
+++ b/src/corelib/arch/parisc/qatomic_parisc.cpp
@@ -0,0 +1,88 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QtCore/qglobal.h>
+#include <QtCore/qhash.h>
+
+QT_BEGIN_NAMESPACE
+
+QT_USE_NAMESPACE
+
+#define UNLOCKED {-1,-1,-1,-1}
+#define UNLOCKED2 UNLOCKED,UNLOCKED
+#define UNLOCKED4 UNLOCKED2,UNLOCKED2
+#define UNLOCKED8 UNLOCKED4,UNLOCKED4
+#define UNLOCKED16 UNLOCKED8,UNLOCKED8
+#define UNLOCKED32 UNLOCKED16,UNLOCKED16
+#define UNLOCKED64 UNLOCKED32,UNLOCKED32
+#define UNLOCKED128 UNLOCKED64,UNLOCKED64
+#define UNLOCKED256 UNLOCKED128,UNLOCKED128
+
+// use a 4k page for locks
+static int locks[256][4] = { UNLOCKED256 };
+
+int *getLock(volatile void *addr)
+{ return locks[qHash(const_cast<void *>(addr)) % 256]; }
+
+static int *align16(int *lock)
+{
+ ulong off = (((ulong) lock) % 16);
+ return off ? (int *)(ulong(lock) + 16 - off) : lock;
+}
+
+extern "C" {
+
+ int q_ldcw(volatile int *addr);
+
+ void q_atomic_lock(int *lock)
+ {
+ // ldcw requires a 16-byte aligned address
+ volatile int *x = align16(lock);
+ while (q_ldcw(x) == 0)
+ ;
+ }
+
+ void q_atomic_unlock(int *lock)
+ { lock[0] = lock[1] = lock[2] = lock[3] = -1; }
+}
+
+
+QT_END_NAMESPACE
diff --git a/src/corelib/arch/powerpc/arch.pri b/src/corelib/arch/powerpc/arch.pri
new file mode 100644
index 0000000000..1989ac73a7
--- /dev/null
+++ b/src/corelib/arch/powerpc/arch.pri
@@ -0,0 +1,10 @@
+#
+# PowerPC architecture
+#
+!*-g++* {
+ *-64 {
+ SOURCES += $$QT_ARCH_CPP/qatomic64.s
+ } else {
+ SOURCES += $$QT_ARCH_CPP/qatomic32.s
+ }
+}
diff --git a/src/corelib/arch/powerpc/qatomic32.s b/src/corelib/arch/powerpc/qatomic32.s
new file mode 100644
index 0000000000..a446359e25
--- /dev/null
+++ b/src/corelib/arch/powerpc/qatomic32.s
@@ -0,0 +1,525 @@
+############################################################################
+##
+## Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+## All rights reserved.
+## Contact: Nokia Corporation (qt-info@nokia.com)
+##
+## This file is part of the QtGui module of the Qt Toolkit.
+##
+## $QT_BEGIN_LICENSE:LGPL$
+## No Commercial Usage
+## This file contains pre-release code and may not be distributed.
+## You may use this file in accordance with the terms and conditions
+## contained in the Technology Preview License Agreement accompanying
+## this package.
+##
+## GNU Lesser General Public License Usage
+## Alternatively, this file may be used under the terms of the GNU Lesser
+## General Public License version 2.1 as published by the Free Software
+## Foundation and appearing in the file LICENSE.LGPL included in the
+## packaging of this file. Please review the following information to
+## ensure the GNU Lesser General Public License version 2.1 requirements
+## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+##
+## In addition, as a special exception, Nokia gives you certain additional
+## rights. These rights are described in the Nokia Qt LGPL Exception
+## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+##
+## If you have questions regarding the use of this file, please contact
+## Nokia at qt-info@nokia.com.
+##
+##
+##
+##
+##
+##
+##
+##
+## $QT_END_LICENSE$
+##
+############################################################################
+ .machine "ppc"
+ .toc
+ .csect .text[PR]
+
+ .align 2
+ .globl q_atomic_test_and_set_int
+ .globl .q_atomic_test_and_set_int
+ .csect q_atomic_test_and_set_int[DS],3
+q_atomic_test_and_set_int:
+ .long .q_atomic_test_and_set_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_int-.q_atomic_test_and_set_int
+ .short 25
+ .byte "q_atomic_test_and_set_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_acquire_int
+ .globl .q_atomic_test_and_set_acquire_int
+ .csect q_atomic_test_and_set_acquire_int[DS],3
+q_atomic_test_and_set_acquire_int:
+ .long .q_atomic_test_and_set_acquire_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_acquire_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+16
+ stwcx. 5,0,3
+ bne- $-16
+ isync
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_acquire_int-.q_atomic_test_and_set_acquire_int
+ .short 33
+ .byte "q_atomic_test_and_set_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_release_int
+ .globl .q_atomic_test_and_set_release_int
+ .csect q_atomic_test_and_set_release_int[DS],3
+q_atomic_test_and_set_release_int:
+ .long .q_atomic_test_and_set_release_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_release_int:
+ eieio
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_release_int-.q_atomic_test_and_set_release_int
+ .short 33
+ .byte "q_atomic_test_and_set_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_ptr
+ .globl .q_atomic_test_and_set_ptr
+ .csect q_atomic_test_and_set_ptr[DS],3
+q_atomic_test_and_set_ptr:
+ .long .q_atomic_test_and_set_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_ptr:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_ptr-.q_atomic_test_and_set_ptr
+ .short 25
+ .byte "q_atomic_test_and_set_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_acquire_ptr
+ .globl .q_atomic_test_and_set_acquire_ptr
+ .csect q_atomic_test_and_set_acquire_ptr[DS],3
+q_atomic_test_and_set_acquire_ptr:
+ .long .q_atomic_test_and_set_acquire_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_acquire_ptr:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+16
+ stwcx. 5,0,3
+ bne- $-16
+ isync
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_acquire_ptr-.q_atomic_test_and_set_acquire_ptr
+ .short 25
+ .byte "q_atomic_test_and_set_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_release_ptr
+ .globl .q_atomic_test_and_set_release_ptr
+ .csect q_atomic_test_and_set_release_ptr[DS],3
+q_atomic_test_and_set_release_ptr:
+ .long .q_atomic_test_and_set_release_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_release_ptr:
+ eieio
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_release_ptr-.q_atomic_test_and_set_release_ptr
+ .short 33
+ .byte "q_atomic_test_and_set_release_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_increment
+ .globl .q_atomic_increment
+ .csect q_atomic_increment[DS],3
+q_atomic_increment:
+ .long .q_atomic_increment,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_increment:
+ lwarx 4,0,3
+ addi 4,4,1
+ stwcx. 4,0,3
+ bne- $-12
+ mr 3,4
+ blr
+LT..q_atomic_increment:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_increment-.q_atomic_increment
+ .short 18
+ .byte "q_atomic_increment"
+ .align 2
+
+ .align 2
+ .globl q_atomic_decrement
+ .globl .q_atomic_decrement
+ .csect q_atomic_decrement[DS],3
+q_atomic_decrement:
+ .long .q_atomic_decrement,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_decrement:
+ lwarx 4,0,3
+ subi 4,4,1
+ stwcx. 4,0,3
+ bne- $-12
+ mr 3,4
+ blr
+LT..q_atomic_decrement:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_decrement-.q_atomic_decrement
+ .short 18
+ .byte "q_atomic_decrement"
+ .align 2
+
+ .align 2
+ .globl q_atomic_set_int
+ .globl .q_atomic_set_int
+ .csect q_atomic_set_int[DS],3
+q_atomic_set_int:
+ .long .q_atomic_set_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_set_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_set_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_set_int-.q_atomic_set_int
+ .short 16
+ .byte "q_atomic_set_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_acquire_int
+ .globl .q_atomic_fetch_and_store_acquire_int
+ .csect q_atomic_fetch_and_store_acquire_int[DS],3
+q_atomic_fetch_and_store_acquire_int:
+ .long .q_atomic_fetch_and_store_acquire_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_acquire_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_acquire_int-.q_atomic_fetch_and_store_acquire_int
+ .short 16
+ .byte "q_atomic_fetch_and_store_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_release_int
+ .globl .q_atomic_fetch_and_store_release_int
+ .csect q_atomic_fetch_and_store_release_int[DS],3
+q_atomic_fetch_and_store_release_int:
+ .long .q_atomic_fetch_and_store_release_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_release_int:
+ eieio
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_release_int-.q_atomic_fetch_and_store_release_int
+ .short 16
+ .byte "q_atomic_fetch_and_store_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_set_ptr
+ .globl .q_atomic_set_ptr
+ .csect q_atomic_set_ptr[DS],3
+q_atomic_set_ptr:
+ .long .q_atomic_set_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_set_ptr:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_set_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_set_ptr-.q_atomic_set_ptr
+ .short 16
+ .byte "q_atomic_set_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_acquire_ptr
+ .globl .q_atomic_fetch_and_store_acquire_ptr
+ .csect q_atomic_fetch_and_store_acquire_ptr[DS],3
+q_atomic_fetch_and_store_acquire_ptr:
+ .long .q_atomic_fetch_and_store_acquire_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_acquire_ptr:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_acquire_ptr-.q_atomic_fetch_and_store_acquire_ptr
+ .short 16
+ .byte "q_atomic_fetch_and_store_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_release_ptr
+ .globl .q_atomic_fetch_and_store_release_ptr
+ .csect q_atomic_fetch_and_store_release_ptr[DS],3
+q_atomic_fetch_and_store_release_ptr:
+ .long .q_atomic_fetch_and_store_release_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_release_ptr:
+ eieio
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_release_ptr-.q_atomic_fetch_and_store_release_ptr
+ .short 16
+ .byte "q_atomic_fetch_and_store_release_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_int
+ .globl .q_atomic_fetch_and_add_int
+ .csect q_atomic_fetch_and_add_int[DS],3
+q_atomic_fetch_and_add_int:
+ .long .q_atomic_fetch_and_add_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_int:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_int-.q_atomic_fetch_and_add_int
+ .short 18
+ .byte "q_atomic_fetch_and_add_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_acquire_int
+ .globl .q_atomic_fetch_and_add_acquire_int
+ .csect q_atomic_fetch_and_add_acquire_int[DS],3
+q_atomic_fetch_and_add_acquire_int:
+ .long .q_atomic_fetch_and_add_acquire_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_acquire_int:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_acquire_int-.q_atomic_fetch_and_add_acquire_int
+ .short 18
+ .byte "q_atomic_fetch_and_add_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_release_int
+ .globl .q_atomic_fetch_and_add_release_int
+ .csect q_atomic_fetch_and_add_release_int[DS],3
+q_atomic_fetch_and_add_release_int:
+ .long .q_atomic_fetch_and_add_release_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_release_int:
+ eieio
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_release_int-.q_atomic_fetch_and_add_release_int
+ .short 34
+ .byte "q_atomic_fetch_and_add_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_ptr
+ .globl .q_atomic_fetch_and_add_ptr
+ .csect q_atomic_fetch_and_add_ptr[DS],3
+q_atomic_fetch_and_add_ptr:
+ .long .q_atomic_fetch_and_add_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_ptr:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_ptr-.q_atomic_fetch_and_add_ptr
+ .short 26
+ .byte "q_atomic_fetch_and_add_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_acquire_ptr
+ .globl .q_atomic_fetch_and_add_acquire_ptr
+ .csect q_atomic_fetch_and_add_acquire_ptr[DS],3
+q_atomic_fetch_and_add_acquire_ptr:
+ .long .q_atomic_fetch_and_add_acquire_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_acquire_ptr:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_acquire_ptr-.q_atomic_fetch_and_add_acquire_ptr
+ .short 34
+ .byte "q_atomic_fetch_and_add_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_release_ptr
+ .globl .q_atomic_fetch_and_add_release_ptr
+ .csect q_atomic_fetch_and_add_release_ptr[DS],3
+q_atomic_fetch_and_add_release_ptr:
+ .long .q_atomic_fetch_and_add_release_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_release_ptr:
+ eieio
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_release_ptr-.q_atomic_fetch_and_add_release_ptr
+ .short 34
+ .byte "q_atomic_fetch_and_add_release_ptr"
+ .align 2
+
+_section_.text:
+ .csect .data[RW],3
+ .long _section_.text
diff --git a/src/corelib/arch/powerpc/qatomic64.s b/src/corelib/arch/powerpc/qatomic64.s
new file mode 100644
index 0000000000..9bf31912a1
--- /dev/null
+++ b/src/corelib/arch/powerpc/qatomic64.s
@@ -0,0 +1,533 @@
+############################################################################
+##
+## Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+## All rights reserved.
+## Contact: Nokia Corporation (qt-info@nokia.com)
+##
+## This file is part of the QtGui module of the Qt Toolkit.
+##
+## $QT_BEGIN_LICENSE:LGPL$
+## No Commercial Usage
+## This file contains pre-release code and may not be distributed.
+## You may use this file in accordance with the terms and conditions
+## contained in the Technology Preview License Agreement accompanying
+## this package.
+##
+## GNU Lesser General Public License Usage
+## Alternatively, this file may be used under the terms of the GNU Lesser
+## General Public License version 2.1 as published by the Free Software
+## Foundation and appearing in the file LICENSE.LGPL included in the
+## packaging of this file. Please review the following information to
+## ensure the GNU Lesser General Public License version 2.1 requirements
+## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+##
+## In addition, as a special exception, Nokia gives you certain additional
+## rights. These rights are described in the Nokia Qt LGPL Exception
+## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+##
+## If you have questions regarding the use of this file, please contact
+## Nokia at qt-info@nokia.com.
+##
+##
+##
+##
+##
+##
+##
+##
+## $QT_END_LICENSE$
+##
+############################################################################
+ .machine "ppc64"
+ .toc
+ .csect .text[PR]
+
+ .align 2
+ .globl q_atomic_test_and_set_int
+ .globl .q_atomic_test_and_set_int
+ .csect q_atomic_test_and_set_int[DS],3
+q_atomic_test_and_set_int:
+ .llong .q_atomic_test_and_set_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ extsw 3,3
+ blr
+LT..q_atomic_test_and_set_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_int-.q_atomic_test_and_set_int
+ .short 25
+ .byte "q_atomic_test_and_set_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_acquire_int
+ .globl .q_atomic_test_and_set_acquire_int
+ .csect q_atomic_test_and_set_acquire_int[DS],3
+q_atomic_test_and_set_acquire_int:
+ .llong .q_atomic_test_and_set_acquire_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_acquire_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+16
+ stwcx. 5,0,3
+ bne- $-16
+ isync
+ subfic 3,6,0
+ adde 3,3,6
+ extsw 3,3
+ blr
+LT..q_atomic_test_and_set_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_acquire_int-.q_atomic_test_and_set_acquire_int
+ .short 33
+ .byte "q_atomic_test_and_set_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_release_int
+ .globl .q_atomic_test_and_set_release_int
+ .csect q_atomic_test_and_set_release_int[DS],3
+q_atomic_test_and_set_release_int:
+ .llong .q_atomic_test_and_set_release_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_release_int:
+ eieio
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ extsw 3,3
+ blr
+LT..q_atomic_test_and_set_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_release_int-.q_atomic_test_and_set_release_int
+ .short 33
+ .byte "q_atomic_test_and_set_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_ptr
+ .globl .q_atomic_test_and_set_ptr
+ .csect q_atomic_test_and_set_ptr[DS],3
+q_atomic_test_and_set_ptr:
+ .llong .q_atomic_test_and_set_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_ptr:
+ ldarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stdcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_ptr-.q_atomic_test_and_set_ptr
+ .short 25
+ .byte "q_atomic_test_and_set_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_acquire_ptr
+ .globl .q_atomic_test_and_set_acquire_ptr
+ .csect q_atomic_test_and_set_acquire_ptr[DS],3
+q_atomic_test_and_set_acquire_ptr:
+ .llong .q_atomic_test_and_set_acquire_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_acquire_ptr:
+ ldarx 6,0,3
+ xor. 6,6,4
+ bne $+16
+ stdcx. 5,0,3
+ bne- $-16
+ isync
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_acquire_ptr-.q_atomic_test_and_set_acquire_ptr
+ .short 33
+ .byte "q_atomic_test_and_set_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_release_ptr
+ .globl .q_atomic_test_and_set_release_ptr
+ .csect q_atomic_test_and_set_release_ptr[DS],3
+q_atomic_test_and_set_release_ptr:
+ .llong .q_atomic_test_and_set_release_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_test_and_set_release_ptr:
+ eieio
+ ldarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stdcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_release_ptr-.q_atomic_test_and_set_release_ptr
+ .short 33
+ .byte "q_atomic_test_and_set_release_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_increment
+ .globl .q_atomic_increment
+ .csect q_atomic_increment[DS],3
+q_atomic_increment:
+ .llong .q_atomic_increment,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_increment:
+ lwarx 4,0,3
+ addi 5,4,1
+ extsw 4,5
+ stwcx. 4,0,3
+ bne- $-16
+ mr 3,4
+ blr
+LT..q_atomic_increment:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_increment-.q_atomic_increment
+ .short 18
+ .byte "q_atomic_increment"
+ .align 2
+
+ .align 2
+ .globl q_atomic_decrement
+ .globl .q_atomic_decrement
+ .csect q_atomic_decrement[DS],3
+q_atomic_decrement:
+ .llong .q_atomic_decrement,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_decrement:
+ lwarx 4,0,3
+ subi 5,4,1
+ extsw 4,5
+ stwcx. 4,0,3
+ bne- $-16
+ mr 3,4
+ blr
+LT..q_atomic_decrement:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_decrement-.q_atomic_decrement
+ .short 18
+ .byte "q_atomic_decrement"
+ .align 2
+
+ .align 2
+ .globl q_atomic_set_int
+ .globl .q_atomic_set_int
+ .csect q_atomic_set_int[DS],3
+q_atomic_set_int:
+ .llong .q_atomic_set_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_set_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ extsw 3,5
+ blr
+LT..q_atomic_set_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_set_int-.q_atomic_set_int
+ .short 16
+ .byte "q_atomic_set_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_acquire_int
+ .globl .q_atomic_fetch_and_store_acquire_int
+ .csect q_atomic_fetch_and_store_acquire_int[DS],3
+q_atomic_fetch_and_store_acquire_int:
+ .llong .q_atomic_fetch_and_store_acquire_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_acquire_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ isync
+ extsw 3,5
+ blr
+LT..q_atomic_fetch_and_store_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_acquire_int-.q_atomic_fetch_and_store_acquire_int
+ .short 36
+ .byte "q_atomic_fetch_and_store_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_release_int
+ .globl .q_atomic_fetch_and_store_release_int
+ .csect q_atomic_fetch_and_store_release_int[DS],3
+q_atomic_fetch_and_store_release_int:
+ .llong .q_atomic_fetch_and_store_release_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_release_int:
+ eieio
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ extsw 3,5
+ blr
+LT..q_atomic_fetch_and_store_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_release_int-.q_atomic_fetch_and_store_release_int
+ .short 36
+ .byte "q_atomic_fetch_and_store_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_set_ptr
+ .globl .q_atomic_set_ptr
+ .csect q_atomic_set_ptr[DS],3
+q_atomic_set_ptr:
+ .llong .q_atomic_set_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_set_ptr:
+ ldarx 5,0,3
+ stdcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_set_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_set_ptr-.q_atomic_set_ptr
+ .short 16
+ .byte "q_atomic_set_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_acquire_ptr
+ .globl .q_atomic_fetch_and_store_acquire_ptr
+ .csect q_atomic_fetch_and_store_acquire_ptr[DS],3
+q_atomic_fetch_and_store_acquire_ptr:
+ .llong .q_atomic_fetch_and_store_acquire_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_acquire_ptr:
+ ldarx 5,0,3
+ stdcx. 4,0,3
+ bne- $-8
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_acquire_ptr-.q_atomic_fetch_and_store_acquire_ptr
+ .short 36
+ .byte "q_atomic_fetch_and_store_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_release_ptr
+ .globl .q_atomic_fetch_and_store_release_ptr
+ .csect q_atomic_fetch_and_store_release_ptr[DS],3
+q_atomic_fetch_and_store_release_ptr:
+ .llong .q_atomic_fetch_and_store_release_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_store_release_ptr:
+ eieio
+ ldarx 5,0,3
+ stdcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_release_ptr-.q_atomic_fetch_and_store_release_ptr
+ .short 36
+ .byte "q_atomic_fetch_and_store_release_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_int
+ .globl .q_atomic_fetch_and_add_int
+ .csect q_atomic_fetch_and_add_int[DS],3
+q_atomic_fetch_and_add_int:
+ .llong .q_atomic_fetch_and_add_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_int:
+ lwarx 5,0,3
+ add 6,4,5
+ extsw 7,6
+ stwcx. 7,0,3
+ bne- $-16
+ extsw 3,5
+ blr
+LT..q_atomic_fetch_and_add_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_int-.q_atomic_fetch_and_add_int
+ .short 26
+ .byte "q_atomic_fetch_and_add_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_acquire_int
+ .globl .q_atomic_fetch_and_add_acquire_int
+ .csect q_atomic_fetch_and_add_acquire_int[DS],3
+q_atomic_fetch_and_add_acquire_int:
+ .llong .q_atomic_fetch_and_add_acquire_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_acquire_int:
+ lwarx 5,0,3
+ add 6,4,5
+ extsw 7,6
+ stwcx. 7,0,3
+ bne- $-16
+ isync
+ extsw 3,5
+ blr
+LT..q_atomic_fetch_and_add_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_acquire_int-.q_atomic_fetch_and_add_acquire_int
+ .short 34
+ .byte "q_atomic_fetch_and_add_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_release_int
+ .globl .q_atomic_fetch_and_add_release_int
+ .csect q_atomic_fetch_and_add_release_int[DS],3
+q_atomic_fetch_and_add_release_int:
+ .llong .q_atomic_fetch_and_add_release_int,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_release_int:
+ eieio
+ lwarx 5,0,3
+ add 6,4,5
+ extsw 7,6
+ stwcx. 7,0,3
+ bne- $-16
+ extsw 3,5
+ blr
+LT..q_atomic_fetch_and_add_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_release_int-.q_atomic_fetch_and_add_release_int
+ .short 34
+ .byte "q_atomic_fetch_and_add_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_ptr
+ .globl .q_atomic_fetch_and_add_ptr
+ .csect q_atomic_fetch_and_add_ptr[DS],3
+q_atomic_fetch_and_add_ptr:
+ .llong .q_atomic_fetch_and_add_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_ptr:
+ ldarx 5,0,3
+ add 6,4,5
+ stdcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_ptr-.q_atomic_fetch_and_add_ptr
+ .short 26
+ .byte "q_atomic_fetch_and_add_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_acquire_ptr
+ .globl .q_atomic_fetch_and_add_acquire_ptr
+ .csect q_atomic_fetch_and_add_acquire_ptr[DS],3
+q_atomic_fetch_and_add_acquire_ptr:
+ .llong .q_atomic_fetch_and_add_acquire_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_acquire_ptr:
+ ldarx 5,0,3
+ add 6,4,5
+ stdcx. 6,0,3
+ bne- $-12
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_acquire_ptr-.q_atomic_fetch_and_add_acquire_ptr
+ .short 34
+ .byte "q_atomic_fetch_and_add_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_release_ptr
+ .globl .q_atomic_fetch_and_add_release_ptr
+ .csect q_atomic_fetch_and_add_release_ptr[DS],3
+q_atomic_fetch_and_add_release_ptr:
+ .llong .q_atomic_fetch_and_add_release_ptr,TOC[tc0],0
+ .csect .text[PR]
+.q_atomic_fetch_and_add_release_ptr:
+ eieio
+ ldarx 5,0,3
+ add 6,4,5
+ stdcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_release_ptr-.q_atomic_fetch_and_add_release_ptr
+ .short 34
+ .byte "q_atomic_fetch_and_add_release_ptr"
+ .align 2
+
+_section_.text:
+ .csect .data[RW],3
+ .llong _section_.text
diff --git a/src/corelib/arch/qatomic_alpha.h b/src/corelib/arch/qatomic_alpha.h
new file mode 100644
index 0000000000..be5d7ec884
--- /dev/null
+++ b/src/corelib/arch/qatomic_alpha.h
@@ -0,0 +1,642 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_ALPHA_H
+#define QATOMIC_ALPHA_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#if defined(Q_CC_GNU)
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int old, tmp;
+ asm volatile("1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "addl %0,1,%1\n" /* tmp=old+1; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ :
+ : "memory");
+ return old != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int old, tmp;
+ asm volatile("1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "subl %0,1,%1\n" /* tmp=old-1; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ :
+ : "memory");
+ return old != 1;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int ret;
+ asm volatile("1:\n"
+ "ldl_l %0,%1\n" /* ret=*ptr; */
+ "cmpeq %0,%2,%0\n"/* if (ret==expected) ret=0; else ret=1; */
+ "beq %0,3f\n" /* if (ret==0) goto 3; */
+ "mov %3,%0\n" /* ret=newval; */
+ "stl_c %0,%1\n" /* if ((*ptr=ret)!=ret) ret=0; else ret=1; */
+ "beq %0,2f\n" /* if (ret==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ register int ret;
+ asm volatile("1:\n"
+ "ldl_l %0,%1\n" /* ret=*ptr; */
+ "cmpeq %0,%2,%0\n"/* if (ret==expected) ret=0; else ret=1; */
+ "beq %0,3f\n" /* if (ret==0) goto 3; */
+ "mov %3,%0\n" /* ret=newval; */
+ "stl_c %0,%1\n" /* if ((*ptr=ret)!=ret) ret=0; else ret=1; */
+ "beq %0,2f\n" /* if (ret==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ "mb\n"
+ : "=&r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ register int ret;
+ asm volatile("mb\n"
+ "1:\n"
+ "ldl_l %0,%1\n" /* ret=*ptr; */
+ "cmpeq %0,%2,%0\n"/* if (ret==expected) ret=0; else ret=1; */
+ "beq %0,3f\n" /* if (ret==0) goto 3; */
+ "mov %3,%0\n" /* ret=newval; */
+ "stl_c %0,%1\n" /* if ((*ptr=ret)!=ret) ret=0; else ret=1; */
+ "beq %0,2f\n" /* if (ret==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ register int old, tmp;
+ asm volatile("1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "mov %3,%1\n" /* tmp=newval; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ register int old, tmp;
+ asm volatile("1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "mov %3,%1\n" /* tmp=newval; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ "mb\n"
+ : "=&r" (old), "=&r" (tmp), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ register int old, tmp;
+ asm volatile("mb\n"
+ "1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "mov %3,%1\n" /* tmp=newval; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ register int old, tmp;
+ asm volatile("1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "addl %0,%3,%1\n"/* tmp=old+value; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ : "r" (valueToAdd)
+ : "memory");
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ register int old, tmp;
+ asm volatile("1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "addl %0,%3,%1\n"/* tmp=old+value; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ "mb\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ : "r" (valueToAdd)
+ : "memory");
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ register int old, tmp;
+ asm volatile("mb\n"
+ "1:\n"
+ "ldl_l %0,%2\n" /* old=*ptr; */
+ "addl %0,%3,%1\n"/* tmp=old+value; */
+ "stl_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ : "r" (valueToAdd)
+ : "memory");
+ return old;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register void *ret;
+ asm volatile("1:\n"
+ "ldq_l %0,%1\n" /* ret=*ptr; */
+ "cmpeq %0,%2,%0\n"/* if (ret==expected) tmp=0; else tmp=1; */
+ "beq %0,3f\n" /* if (tmp==0) goto 3; */
+ "mov %3,%0\n" /* tmp=newval; */
+ "stq_c %0,%1\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %0,2f\n" /* if (ret==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ register void *ret;
+ asm volatile("1:\n"
+ "ldq_l %0,%1\n" /* ret=*ptr; */
+ "cmpeq %0,%2,%0\n"/* if (ret==expected) tmp=0; else tmp=1; */
+ "beq %0,3f\n" /* if (tmp==0) goto 3; */
+ "mov %3,%0\n" /* tmp=newval; */
+ "stq_c %0,%1\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %0,2f\n" /* if (ret==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ "mb\n"
+ : "=&r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ register void *ret;
+ asm volatile("mb\n"
+ "1:\n"
+ "ldq_l %0,%1\n" /* ret=*ptr; */
+ "cmpeq %0,%2,%0\n"/* if (ret==expected) tmp=0; else tmp=1; */
+ "beq %0,3f\n" /* if (tmp==0) goto 3; */
+ "mov %3,%0\n" /* tmp=newval; */
+ "stq_c %0,%1\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %0,2f\n" /* if (ret==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ register T *old, *tmp;
+ asm volatile("1:\n"
+ "ldq_l %0,%2\n" /* old=*ptr; */
+ "mov %3,%1\n" /* tmp=newval; */
+ "stq_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return old;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ register T *old, *tmp;
+ asm volatile("1:\n"
+ "ldq_l %0,%2\n" /* old=*ptr; */
+ "mov %3,%1\n" /* tmp=newval; */
+ "stq_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ "mb\n"
+ : "=&r" (old), "=&r" (tmp), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return old;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ register T *old, *tmp;
+ asm volatile("mb\n"
+ "1:\n"
+ "ldq_l %0,%2\n" /* old=*ptr; */
+ "mov %3,%1\n" /* tmp=newval; */
+ "stq_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp==0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return old;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ register T *old, *tmp;
+ asm volatile("1:\n"
+ "ldq_l %0,%2\n" /* old=*ptr; */
+ "addq %0,%3,%1\n"/* tmp=old+value; */
+ "stq_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ : "r" (valueToAdd)
+ : "memory");
+ return reinterpret_cast<T *>(old);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ register T *old, *tmp;
+ asm volatile("1:\n"
+ "ldq_l %0,%2\n" /* old=*ptr; */
+ "addq %0,%3,%1\n"/* tmp=old+value; */
+ "stq_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ "mb\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ : "r" (valueToAdd)
+ : "memory");
+ return reinterpret_cast<T *>(old);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ register T *old, *tmp;
+ asm volatile("mb\n"
+ "1:\n"
+ "ldq_l %0,%2\n" /* old=*ptr; */
+ "addq %0,%3,%1\n"/* tmp=old+value; */
+ "stq_c %1,%2\n" /* if ((*ptr=tmp)!=tmp) tmp=0; else tmp=1; */
+ "beq %1,2f\n" /* if (tmp == 0) goto 2; */
+ "br 3f\n" /* goto 3; */
+ "2: br 1b\n" /* goto 1; */
+ "3:\n"
+ : "=&r" (old), "=&r" (tmp), "+m"(_q_value)
+ : "r" (valueToAdd)
+ : "memory");
+ return reinterpret_cast<T *>(old);
+}
+
+#else // !Q_CC_GNU
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_acquire_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_release_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
+ Q_CORE_EXPORT int q_atomic_increment(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_decrement(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_set_int(volatile int *ptr, int newval);
+ Q_CORE_EXPORT void *q_atomic_set_ptr(volatile void *ptr, void *newval);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_int(volatile int *ptr, int value);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_acquire_int(volatile int *ptr, int value);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_release_int(volatile int *ptr, int value);
+} // extern "C"
+
+inline bool QBasicAtomicInt::ref()
+{
+ return q_atomic_increment(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return q_atomic_decrement(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_acquire_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_release_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return q_atomic_set_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return q_atomic_fetch_and_store_acquire_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return q_atomic_fetch_and_store_release_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_int(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_acquire_int(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_release_int(&_q_value, valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_acquire_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_release_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_set_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_acquire_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_release_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_ptr(&_q_value, newValue));
+}
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_acquire_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_release_ptr(&_q_value, newValue));
+}
+
+#endif // Q_CC_GNU
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_ALPHA_H
diff --git a/src/corelib/arch/qatomic_arch.h b/src/corelib/arch/qatomic_arch.h
new file mode 100644
index 0000000000..3da833a424
--- /dev/null
+++ b/src/corelib/arch/qatomic_arch.h
@@ -0,0 +1,101 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_ARCH_H
+#define QATOMIC_ARCH_H
+
+QT_BEGIN_HEADER
+
+#include "QtCore/qglobal.h"
+
+#if defined(QT_ARCH_INTEGRITY)
+# include "QtCore/qatomic_integrity.h"
+#elif defined(QT_ARCH_VXWORKS)
+# include "QtCore/qatomic_vxworks.h"
+#elif defined(QT_ARCH_ALPHA)
+# include "QtCore/qatomic_alpha.h"
+#elif defined(QT_ARCH_ARM)
+# include "QtCore/qatomic_arm.h"
+#elif defined(QT_ARCH_ARMV6)
+# include "QtCore/qatomic_armv6.h"
+#elif defined(QT_ARCH_AVR32)
+# include "QtCore/qatomic_avr32.h"
+#elif defined(QT_ARCH_BFIN)
+# include "QtCore/qatomic_bfin.h"
+#elif defined(QT_ARCH_GENERIC)
+# include "QtCore/qatomic_generic.h"
+#elif defined(QT_ARCH_I386)
+# include "QtCore/qatomic_i386.h"
+#elif defined(QT_ARCH_IA64)
+# include "QtCore/qatomic_ia64.h"
+#elif defined(QT_ARCH_MACOSX)
+# include "QtCore/qatomic_macosx.h"
+#elif defined(QT_ARCH_MIPS)
+# include "QtCore/qatomic_mips.h"
+#elif defined(QT_ARCH_PARISC)
+# include "QtCore/qatomic_parisc.h"
+#elif defined(QT_ARCH_POWERPC)
+# include "QtCore/qatomic_powerpc.h"
+#elif defined(QT_ARCH_S390)
+# include "QtCore/qatomic_s390.h"
+#elif defined(QT_ARCH_SPARC)
+# include "QtCore/qatomic_sparc.h"
+#elif defined(QT_ARCH_WINDOWS)
+# include "QtCore/qatomic_windows.h"
+#elif defined(QT_ARCH_WINDOWSCE)
+# include "QtCore/qatomic_windowsce.h"
+#elif defined(QT_ARCH_X86_64)
+# include "QtCore/qatomic_x86_64.h"
+#elif defined(QT_ARCH_SYMBIAN)
+# include "QtCore/qatomic_symbian.h"
+#elif defined(QT_ARCH_SH)
+# include "QtCore/qatomic_sh.h"
+#elif defined(QT_ARCH_SH4A)
+# include "QtCore/qatomic_sh4a.h"
+#elif defined(QT_ARCH_NACL)
+# include "QtCore/qatomic_generic.h"
+#else
+# error "Qt has not been ported to this architecture"
+#endif
+
+QT_END_HEADER
+
+#endif // QATOMIC_ARCH_H
diff --git a/src/corelib/arch/qatomic_arm.h b/src/corelib/arch/qatomic_arm.h
new file mode 100644
index 0000000000..07d21438d6
--- /dev/null
+++ b/src/corelib/arch/qatomic_arm.h
@@ -0,0 +1,76 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_ARM_H
+#define QATOMIC_ARM_H
+
+QT_BEGIN_HEADER
+
+#if defined(__ARM_ARCH_7__) \
+ || defined(__ARM_ARCH_7A__) \
+ || defined(__ARM_ARCH_7R__) \
+ || defined(__ARM_ARCH_7M__)
+# define QT_ARCH_ARMV7
+QT_BEGIN_INCLUDE_HEADER
+# include "QtCore/qatomic_armv7.h"
+QT_END_INCLUDE_HEADER
+#elif defined(__ARM_ARCH_6__) \
+ || defined(__ARM_ARCH_6J__) \
+ || defined(__ARM_ARCH_6T2__) \
+ || defined(__ARM_ARCH_6Z__) \
+ || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6ZK__) \
+ || defined(__ARM_ARCH_6M__) \
+ || (defined(__TARGET_ARCH_ARM) && (__TARGET_ARCH_ARM-0 >= 6))
+# define QT_ARCH_ARMV6
+QT_BEGIN_INCLUDE_HEADER
+# include "QtCore/qatomic_armv6.h"
+QT_END_INCLUDE_HEADER
+#else
+# define QT_ARCH_ARMV5
+QT_BEGIN_INCLUDE_HEADER
+# include "QtCore/qatomic_armv5.h"
+QT_END_INCLUDE_HEADER
+#endif
+
+QT_END_HEADER
+
+#endif // QATOMIC_ARM_H
diff --git a/src/corelib/arch/qatomic_armv5.h b/src/corelib/arch/qatomic_armv5.h
new file mode 100644
index 0000000000..ac8fe9670e
--- /dev/null
+++ b/src/corelib/arch/qatomic_armv5.h
@@ -0,0 +1,431 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_ARMV5_H
+#define QATOMIC_ARMV5_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#ifndef QT_NO_ARM_EABI
+
+// kernel places a restartable cmpxchg implementation at a fixed address
+extern "C" typedef int (qt_atomic_eabi_cmpxchg_int_t)(int oldval, int newval, volatile int *ptr);
+extern "C" typedef int (qt_atomic_eabi_cmpxchg_ptr_t)(const void *oldval, const void *newval, volatile void *ptr);
+#define qt_atomic_eabi_cmpxchg_int (*reinterpret_cast<qt_atomic_eabi_cmpxchg_int_t *>(0xffff0fc0))
+#define qt_atomic_eabi_cmpxchg_ptr (*reinterpret_cast<qt_atomic_eabi_cmpxchg_ptr_t *>(0xffff0fc0))
+
+#else
+
+extern Q_CORE_EXPORT char q_atomic_lock;
+Q_CORE_EXPORT void qt_atomic_yield(int *);
+
+#ifdef Q_CC_RVCT
+
+Q_CORE_EXPORT __asm char q_atomic_swp(volatile char *ptr, char newval);
+
+#else
+
+inline char q_atomic_swp(volatile char *ptr, char newval)
+{
+ register char ret;
+ asm volatile("swpb %0,%2,[%3]"
+ : "=&r"(ret), "=m" (*ptr)
+ : "r"(newval), "r"(ptr)
+ : "cc", "memory");
+ return ret;
+}
+
+#endif // Q_CC_RVCT
+
+#endif // QT_NO_ARM_EABI
+
+// Reference counting
+
+inline bool QBasicAtomicInt::ref()
+{
+#ifndef QT_NO_ARM_EABI
+ register int originalValue;
+ register int newValue;
+ do {
+ originalValue = _q_value;
+ newValue = originalValue + 1;
+ } while (qt_atomic_eabi_cmpxchg_int(originalValue, newValue, &_q_value) != 0);
+ return newValue != 0;
+#else
+ int count = 0;
+ while (q_atomic_swp(&q_atomic_lock, ~0) != 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value++;
+ q_atomic_swp(&q_atomic_lock, 0);
+ return originalValue != -1;
+#endif
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+#ifndef QT_NO_ARM_EABI
+ register int originalValue;
+ register int newValue;
+ do {
+ originalValue = _q_value;
+ newValue = originalValue - 1;
+ } while (qt_atomic_eabi_cmpxchg_int(originalValue, newValue, &_q_value) != 0);
+ return newValue != 0;
+#else
+ int count = 0;
+ while (q_atomic_swp(&q_atomic_lock, ~0) != 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value--;
+ q_atomic_swp(&q_atomic_lock, 0);
+ return originalValue != 1;
+#endif
+}
+
+// Test and set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+#ifndef QT_NO_ARM_EABI
+ register int originalValue;
+ do {
+ originalValue = _q_value;
+ if (originalValue != expectedValue)
+ return false;
+ } while (qt_atomic_eabi_cmpxchg_int(expectedValue, newValue, &_q_value) != 0);
+ return true;
+#else
+ bool returnValue = false;
+ int count = 0;
+ while (q_atomic_swp(&q_atomic_lock, ~0) != 0)
+ qt_atomic_yield(&count);
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ returnValue = true;
+ }
+ q_atomic_swp(&q_atomic_lock, 0);
+ return returnValue;
+#endif
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for integers
+
+#ifndef Q_CC_RVCT
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ int originalValue;
+ asm volatile("swp %0,%2,[%3]"
+ : "=&r"(originalValue), "=m" (_q_value)
+ : "r"(newValue), "r"(&_q_value)
+ : "cc", "memory");
+ return originalValue;
+}
+
+#endif
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+#ifndef QT_NO_ARM_EABI
+ register int originalValue;
+ register int newValue;
+ do {
+ originalValue = _q_value;
+ newValue = originalValue + valueToAdd;
+ } while (qt_atomic_eabi_cmpxchg_int(originalValue, newValue, &_q_value) != 0);
+ return originalValue;
+#else
+ int count = 0;
+ while (q_atomic_swp(&q_atomic_lock, ~0) != 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value;
+ _q_value += valueToAdd;
+ q_atomic_swp(&q_atomic_lock, 0);
+ return originalValue;
+#endif
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+#ifndef QT_NO_ARM_EABI
+ register T *originalValue;
+ do {
+ originalValue = _q_value;
+ if (originalValue != expectedValue)
+ return false;
+ } while (qt_atomic_eabi_cmpxchg_ptr(expectedValue, newValue, &_q_value) != 0);
+ return true;
+#else
+ bool returnValue = false;
+ int count = 0;
+ while (q_atomic_swp(&q_atomic_lock, ~0) != 0)
+ qt_atomic_yield(&count);
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ returnValue = true;
+ }
+ q_atomic_swp(&q_atomic_lock, 0);
+ return returnValue;
+#endif
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+#ifdef Q_CC_RVCT
+
+template <typename T>
+__asm T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ add r2, pc, #0
+ bx r2
+ arm
+ swp r2,r1,[r0]
+ mov r0, r2
+ bx lr
+ thumb
+}
+
+#else
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ T *originalValue;
+ asm volatile("swp %0,%2,[%3]"
+ : "=&r"(originalValue), "=m" (_q_value)
+ : "r"(newValue), "r"(&_q_value)
+ : "cc", "memory");
+ return originalValue;
+}
+
+#endif // Q_CC_RVCT
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+#ifndef QT_NO_ARM_EABI
+ register T *originalValue;
+ register T *newValue;
+ do {
+ originalValue = _q_value;
+ newValue = originalValue + valueToAdd;
+ } while (qt_atomic_eabi_cmpxchg_ptr(originalValue, newValue, &_q_value) != 0);
+ return originalValue;
+#else
+ int count = 0;
+ while (q_atomic_swp(&q_atomic_lock, ~0) != 0)
+ qt_atomic_yield(&count);
+ T *originalValue = (_q_value);
+ _q_value += valueToAdd;
+ q_atomic_swp(&q_atomic_lock, 0);
+ return originalValue;
+#endif
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_ARMV5_H
diff --git a/src/corelib/arch/qatomic_armv6.h b/src/corelib/arch/qatomic_armv6.h
new file mode 100644
index 0000000000..3bd605853f
--- /dev/null
+++ b/src/corelib/arch/qatomic_armv6.h
@@ -0,0 +1,559 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_ARMV6_H
+#define QATOMIC_ARMV6_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#ifndef Q_CC_RVCT
+
+#ifndef Q_DATA_MEMORY_BARRIER
+# define Q_DATA_MEMORY_BARRIER asm volatile("":::"memory")
+#endif
+#ifndef Q_COMPILER_MEMORY_BARRIER
+# define Q_COMPILER_MEMORY_BARRIER asm volatile("":::"memory")
+#endif
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int newValue;
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[newValue], [%[_q_value]]\n"
+ "add %[newValue], %[newValue], #1\n"
+ "strex %[result], %[newValue], [%[_q_value]]\n"
+ "teq %[result], #0\n"
+ "bne 0b\n"
+ : [newValue] "=&r" (newValue),
+ [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value)
+ : "cc", "memory");
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int newValue;
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[newValue], [%[_q_value]]\n"
+ "sub %[newValue], %[newValue], #1\n"
+ "strex %[result], %[newValue], [%[_q_value]]\n"
+ "teq %[result], #0\n"
+ "bne 0b\n"
+ : [newValue] "=&r" (newValue),
+ [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value)
+ : "cc", "memory");
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[result], [%[_q_value]]\n"
+ "eors %[result], %[result], %[expectedValue]\n"
+ "strexeq %[result], %[newValue], [%[_q_value]]\n"
+ "teqeq %[result], #1\n"
+ "beq 0b\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue),
+ [_q_value] "r" (&_q_value)
+ : "cc");
+ return result == 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ register int originalValue;
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[originalValue], [%[_q_value]]\n"
+ "strex %[result], %[newValue], [%[_q_value]]\n"
+ "teq %[result], #0\n"
+ "bne 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ [result] "=&r" (result),
+ "+m" (_q_value)
+ : [newValue] "r" (newValue),
+ [_q_value] "r" (&_q_value)
+ : "cc");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[originalValue], [%[_q_value]]\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ "strex %[result], %[newValue], [%[_q_value]]\n"
+ "teq %[result], #0\n"
+ "bne 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ [result] "=&r" (result),
+ "+m" (_q_value)
+ : [valueToAdd] "r" (valueToAdd),
+ [_q_value] "r" (&_q_value)
+ : "cc");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register T *result;
+ asm volatile("0:\n"
+ "ldrex %[result], [%[_q_value]]\n"
+ "eors %[result], %[result], %[expectedValue]\n"
+ "strexeq %[result], %[newValue], [%[_q_value]]\n"
+ "teqeq %[result], #1\n"
+ "beq 0b\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue),
+ [_q_value] "r" (&_q_value)
+ : "cc");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ register T *originalValue;
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[originalValue], [%[_q_value]]\n"
+ "strex %[result], %[newValue], [%[_q_value]]\n"
+ "teq %[result], #0\n"
+ "bne 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ [result] "=&r" (result),
+ "+m" (_q_value)
+ : [newValue] "r" (newValue),
+ [_q_value] "r" (&_q_value)
+ : "cc");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ register int result;
+ asm volatile("0:\n"
+ "ldrex %[originalValue], [%[_q_value]]\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ "strex %[result], %[newValue], [%[_q_value]]\n"
+ "teq %[result], #0\n"
+ "bne 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ [result] "=&r" (result),
+ "+m" (_q_value)
+ : [valueToAdd] "r" (valueToAdd * sizeof(T)),
+ [_q_value] "r" (&_q_value)
+ : "cc");
+ return originalValue;
+}
+
+#else
+// This is Q_CC_RVCT
+
+// RVCT inline assembly documentation:
+// http://www.keil.com/support/man/docs/armcc/armcc_chdcffdb.htm
+// RVCT embedded assembly documentation:
+// http://www.keil.com/support/man/docs/armcc/armcc_chddbeib.htm
+
+#if __TARGET_ARCH_THUMB-0 < 4
+// save our pragma state and switch to ARM mode (unless using Thumb2)
+# pragma push
+# pragma arm
+#endif
+
+#ifndef Q_DATA_MEMORY_BARRIER
+# define Q_DATA_MEMORY_BARRIER __schedule_barrier()
+#endif
+#ifndef Q_COMPILER_MEMORY_BARRIER
+# define Q_COMPILER_MEMORY_BARRIER __schedule_barrier()
+#endif
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int newValue;
+ register int result;
+ retry:
+ __asm {
+ ldrex newValue, [&_q_value]
+ add newValue, newValue, #1
+ strex result, newValue, [&_q_value]
+ teq result, #0
+ bne retry
+ }
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int newValue;
+ register int result;
+ retry:
+ __asm {
+ ldrex newValue, [&_q_value]
+ sub newValue, newValue, #1
+ strex result, newValue, [&_q_value]
+ teq result, #0
+ bne retry
+ }
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int result;
+ retry:
+ __asm {
+ ldrex result, [&_q_value]
+ eors result, result, expectedValue
+ strexeq result, newValue, [&_q_value]
+ teqeq result, #1
+ beq retry
+ }
+ return result == 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ register int originalValue;
+ register int result;
+ retry:
+ __asm {
+ ldrex originalValue, [&_q_value]
+ strex result, newValue, [&_q_value]
+ teq result, #0
+ bne retry
+ }
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ register int result;
+ retry:
+ __asm {
+ ldrex originalValue, [&_q_value]
+ add newValue, originalValue, valueToAdd
+ strex result, newValue, [&_q_value]
+ teq result, #0
+ bne retry
+ }
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register T *result;
+ retry:
+ __asm {
+ ldrex result, [&_q_value]
+ eors result, result, expectedValue
+ strexeq result, newValue, [&_q_value]
+ teqeq result, #1
+ beq retry
+ }
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ register T *originalValue;
+ register int result;
+ retry:
+ __asm {
+ ldrex originalValue, [&_q_value]
+ strex result, newValue, [&_q_value]
+ teq result, #0
+ bne retry
+ }
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ register int result;
+ retry:
+ __asm {
+ ldrex originalValue, [&_q_value]
+ add newValue, originalValue, valueToAdd * sizeof(T)
+ strex result, newValue, [&_q_value]
+ teq result, #0
+ bne retry
+ }
+ return originalValue;
+}
+
+#if __TARGET_ARCH_THUMB-0 < 4
+# pragma pop
+#endif
+
+#endif
+
+// common code
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ bool returnValue = testAndSetRelaxed(expectedValue, newValue);
+ Q_DATA_MEMORY_BARRIER;
+ return returnValue;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ return testAndSetRelaxed(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ bool returnValue = testAndSetRelaxed(expectedValue, newValue);
+ Q_COMPILER_MEMORY_BARRIER;
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ int returnValue = fetchAndStoreRelaxed(newValue);
+ Q_DATA_MEMORY_BARRIER;
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ return fetchAndStoreRelaxed(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ int returnValue = fetchAndStoreRelaxed(newValue);
+ Q_COMPILER_MEMORY_BARRIER;
+ return returnValue;
+}
+
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ int returnValue = fetchAndAddRelaxed(valueToAdd);
+ Q_DATA_MEMORY_BARRIER;
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ Q_DATA_MEMORY_BARRIER;
+ return fetchAndAddRelaxed(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ Q_DATA_MEMORY_BARRIER;
+ int returnValue = fetchAndAddRelaxed(valueToAdd);
+ Q_COMPILER_MEMORY_BARRIER;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ bool returnValue = testAndSetRelaxed(expectedValue, newValue);
+ Q_DATA_MEMORY_BARRIER;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ return testAndSetRelaxed(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ bool returnValue = testAndSetAcquire(expectedValue, newValue);
+ Q_COMPILER_MEMORY_BARRIER;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ T *returnValue = fetchAndStoreRelaxed(newValue);
+ Q_DATA_MEMORY_BARRIER;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ return fetchAndStoreRelaxed(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ Q_DATA_MEMORY_BARRIER;
+ T *returnValue = fetchAndStoreRelaxed(newValue);
+ Q_COMPILER_MEMORY_BARRIER;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ T *returnValue = fetchAndAddRelaxed(valueToAdd);
+ Q_DATA_MEMORY_BARRIER;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ Q_DATA_MEMORY_BARRIER;
+ return fetchAndAddRelaxed(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ Q_DATA_MEMORY_BARRIER;
+ T *returnValue = fetchAndAddRelaxed(valueToAdd);
+ Q_COMPILER_MEMORY_BARRIER;
+ return returnValue;
+}
+
+#undef Q_DATA_MEMORY_BARRIER
+#undef Q_COMPILER_MEMORY_BARRIER
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_ARMV6_H
diff --git a/src/corelib/arch/qatomic_armv7.h b/src/corelib/arch/qatomic_armv7.h
new file mode 100644
index 0000000000..b35866b32c
--- /dev/null
+++ b/src/corelib/arch/qatomic_armv7.h
@@ -0,0 +1,61 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_ARMV7_H
+#define QATOMIC_ARMV7_H
+
+QT_BEGIN_HEADER
+
+// use the DMB instruction when compiling for ARMv7, ...
+#ifndef Q_CC_RCVT
+# define Q_DATA_MEMORY_BARRIER asm volatile("dmb\n":::"memory")
+#else
+# define Q_DATA_MEMORY_BARRIER do{__asm { dmb } __schedule_barrier();}while(0)
+#endif
+
+// ... but the implementation is otherwise identical to that for ARMv6
+QT_BEGIN_INCLUDE_HEADER
+#include "QtCore/qatomic_armv6.h"
+QT_END_INCLUDE_HEADER
+
+QT_END_HEADER
+
+#endif // QATOMIC_ARMV7_H
diff --git a/src/corelib/arch/qatomic_avr32.h b/src/corelib/arch/qatomic_avr32.h
new file mode 100644
index 0000000000..a650d73332
--- /dev/null
+++ b/src/corelib/arch/qatomic_avr32.h
@@ -0,0 +1,252 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_AVR32_H
+#define QATOMIC_AVR32_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+inline bool QBasicAtomicInt::ref()
+{
+ return __sync_add_and_fetch(&_q_value, 1);
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return __sync_sub_and_fetch(&_q_value, 1);
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return __sync_bool_compare_and_swap(&_q_value, expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return __sync_lock_test_and_set(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return __sync_fetch_and_add(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return __sync_bool_compare_and_swap(&_q_value, expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return __sync_lock_test_and_set(&_q_value, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return __sync_fetch_and_add(&_q_value, valueToAdd * sizeof(T));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_AVR32_H
diff --git a/src/corelib/arch/qatomic_bfin.h b/src/corelib/arch/qatomic_bfin.h
new file mode 100644
index 0000000000..dd53342dfa
--- /dev/null
+++ b/src/corelib/arch/qatomic_bfin.h
@@ -0,0 +1,343 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_BFIN_H
+#define QATOMIC_BFIN_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#if defined(Q_OS_LINUX) && defined(Q_CC_GNU)
+
+QT_BEGIN_INCLUDE_NAMESPACE
+#include <asm/fixed_code.h>
+QT_END_INCLUDE_NAMESPACE
+
+inline bool QBasicAtomicInt::ref()
+{
+ int ret;
+ asm volatile("R0 = 1;\n\t"
+ "P0 = %3;\n\t"
+ "CALL (%2);\n\t"
+ "%0 = R0;"
+ : "=da" (ret), "=m" (_q_value)
+ : "a" (ATOMIC_ADD32), "da" (&_q_value), "m" (_q_value)
+ : "R0", "R1", "P0", "RETS", "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ int ret;
+ asm volatile("R0 = 1;\n\t"
+ "P0 = %3;\n\t"
+ "CALL (%2);\n\t"
+ "%0 = R0;"
+ : "=da" (ret), "=m" (_q_value)
+ : "a" (ATOMIC_SUB32), "da" (&_q_value), "m" (_q_value)
+ : "R0", "R1", "P0", "RETS", "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ long int readval;
+ asm volatile ("P0 = %2;\n\t"
+ "R1 = %3;\n\t"
+ "R2 = %4;\n\t"
+ "CALL (%5);\n\t"
+ "%0 = R0;\n\t"
+ : "=da" (readval), "=m" (_q_value)
+ : "da" (&_q_value),
+ "da" (expectedValue),
+ "da" (newValue),
+ "a" (ATOMIC_CAS32),
+ "m" (_q_value)
+ : "P0", "R0", "R1", "R2", "RETS", "memory", "cc");
+ return readval == expectedValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ asm volatile("R1 = %2;\n\t"
+ "P0 = %4;\n\t"
+ "CALL (%3);\n\t"
+ "%0 = R0;"
+ : "=da" (newValue), "=m" (_q_value)
+ : "da" (newValue), "a" (ATOMIC_XCHG32), "da" (&_q_value), "m" (_q_value)
+ : "R0", "R1", "P0", "RETS", "memory");
+ return newValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ int ret;
+ asm volatile("R0 = %[val];\n\t"
+ "P0 = %[qvalp];\n\t"
+ "CALL (%[addr]);\n\t"
+ "%[ret] = R1;"
+ : [ret] "=da" (ret), "=m" (_q_value)
+ : [addr] "a" (ATOMIC_ADD32), [qvalp] "da" (&_q_value), "m" (_q_value), [val] "da" (valueToAdd)
+ : "R0", "R1", "P0", "RETS", "memory");
+ return ret;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ T *readval;
+ asm volatile ("P0 = %2;\n\t"
+ "R1 = %3;\n\t"
+ "R2 = %4;\n\t"
+ "CALL (%5);\n\t"
+ "%0 = R0;\n\t"
+ : "=da" (readval), "=m" (_q_value)
+ : "da" (&_q_value),
+ "da" (expectedValue),
+ "da" (newValue),
+ "a" (ATOMIC_CAS32),
+ "m" (_q_value)
+ : "P0", "R0", "R1", "R2", "RETS", "memory", "cc");
+ return readval == expectedValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ asm volatile("R1 = %2;\n\t"
+ "P0 = %4;\n\t"
+ "CALL (%3);\n\t"
+ "%0 = R0;"
+ : "=da" (newValue), "=m" (_q_value)
+ : "da" (newValue), "a" (ATOMIC_XCHG32), "da" (&_q_value), "m" (_q_value)
+ : "R0", "R1", "P0", "RETS", "memory");
+ return newValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ T* ret;
+ asm volatile("R0 = %[val];\n\t"
+ "P0 = %[qvalp];\n\t"
+ "CALL (%[addr]);\n\t"
+ "%[ret] = R1;"
+ : [ret] "=da" (ret), "=m" (_q_value)
+ : [addr] "a" (ATOMIC_ADD32), [qvalp] "da" (&_q_value), "m" (_q_value), [val] "da" (valueToAdd * sizeof(T))
+ : "R0", "R1", "P0", "RETS", "memory");
+ return ret;
+}
+
+
+#endif // Q_OS_LINUX && Q_CC_GNU
+
+// Test and set for integers
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for integers
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_BFIN_H
diff --git a/src/corelib/arch/qatomic_bootstrap.h b/src/corelib/arch/qatomic_bootstrap.h
new file mode 100644
index 0000000000..7927d8b557
--- /dev/null
+++ b/src/corelib/arch/qatomic_bootstrap.h
@@ -0,0 +1,99 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_BOOTSTRAP_H
+#define QATOMIC_BOOTSTRAP_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+inline bool QBasicAtomicInt::ref()
+{
+ return ++_q_value != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return --_q_value != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ return true;
+ }
+ return false;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ int returnValue = _q_value;
+ _q_value += valueToAdd;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ return true;
+ }
+ return false;
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_BOOTSTRAP_H
diff --git a/src/corelib/arch/qatomic_generic.h b/src/corelib/arch/qatomic_generic.h
new file mode 100644
index 0000000000..d9c864798e
--- /dev/null
+++ b/src/corelib/arch/qatomic_generic.h
@@ -0,0 +1,282 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_GENERIC_H
+#define QATOMIC_GENERIC_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+Q_CORE_EXPORT bool QBasicAtomicInt_testAndSetOrdered(volatile int *, int, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndAddOrdered(volatile int *, int);
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *, void *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *, qptrdiff);
+
+// Reference counting
+
+inline bool QBasicAtomicInt::ref()
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(&_q_value, 1) != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(&_q_value, -1) != 1;
+}
+
+// Test and set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetOrdered(&_q_value, expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreOrdered(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ union { T * volatile * typed; void * volatile * voidp; } pointer;
+ pointer.typed = &_q_value;
+ return QBasicAtomicPointer_testAndSetOrdered(pointer.voidp, expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ union { T * volatile * typed; void * volatile * voidp; } pointer;
+ union { T *typed; void *voidp; } returnValue;
+ pointer.typed = &_q_value;
+ returnValue.voidp = QBasicAtomicPointer_fetchAndStoreOrdered(pointer.voidp, newValue);
+ return returnValue.typed;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ union { T * volatile *typed; void * volatile *voidp; } pointer;
+ union { T *typed; void *voidp; } returnValue;
+ pointer.typed = &_q_value;
+ returnValue.voidp = QBasicAtomicPointer_fetchAndAddOrdered(pointer.voidp, valueToAdd * sizeof(T));
+ return returnValue.typed;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_GENERIC_H
diff --git a/src/corelib/arch/qatomic_i386.h b/src/corelib/arch/qatomic_i386.h
new file mode 100644
index 0000000000..73095a94f6
--- /dev/null
+++ b/src/corelib/arch/qatomic_i386.h
@@ -0,0 +1,361 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_I386_H
+#define QATOMIC_I386_H
+
+QT_BEGIN_HEADER
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return true; }
+
+#if defined(Q_CC_GNU) || defined(Q_CC_INTEL)
+
+inline bool QBasicAtomicInt::ref()
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "incl %0\n"
+ "setne %1"
+ : "=m" (_q_value), "=qm" (ret)
+ : "m" (_q_value)
+ : "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "decl %0\n"
+ "setne %1"
+ : "=m" (_q_value), "=qm" (ret)
+ : "m" (_q_value)
+ : "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "cmpxchgl %3,%2\n"
+ "sete %1\n"
+ : "=a" (newValue), "=qm" (ret), "+m" (_q_value)
+ : "r" (newValue), "0" (expectedValue)
+ : "memory");
+ return ret != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ asm volatile("xchgl %0,%1"
+ : "=r" (newValue), "+m" (_q_value)
+ : "0" (newValue)
+ : "memory");
+ return newValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ asm volatile("lock\n"
+ "xaddl %0,%1"
+ : "=r" (valueToAdd), "+m" (_q_value)
+ : "0" (valueToAdd)
+ : "memory");
+ return valueToAdd;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "cmpxchgl %3,%2\n"
+ "sete %1\n"
+ : "=a" (newValue), "=qm" (ret), "+m" (_q_value)
+ : "r" (newValue), "0" (expectedValue)
+ : "memory");
+ return ret != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ asm volatile("xchgl %0,%1"
+ : "=r" (newValue), "+m" (_q_value)
+ : "0" (newValue)
+ : "memory");
+ return newValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ asm volatile("lock\n"
+ "xaddl %0,%1"
+ : "=r" (valueToAdd), "+m" (_q_value)
+ : "0" (valueToAdd * sizeof(T))
+ : "memory");
+ return reinterpret_cast<T *>(valueToAdd);
+}
+
+#else
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
+ Q_CORE_EXPORT int q_atomic_increment(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_decrement(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_set_int(volatile int *ptr, int newval);
+ Q_CORE_EXPORT void *q_atomic_set_ptr(volatile void *ptr, void *newval);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_int(volatile int *ptr, int value);
+ Q_CORE_EXPORT void *q_atomic_fetch_and_add_ptr(volatile void *ptr, int value);
+} // extern "C"
+
+inline bool QBasicAtomicInt::ref()
+{
+ return q_atomic_increment(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return q_atomic_decrement(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return q_atomic_set_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_int(&_q_value, valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_ptr(&_q_value, expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_set_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+#endif
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+QT_END_HEADER
+
+#endif // QATOMIC_I386_H
diff --git a/src/corelib/arch/qatomic_ia64.h b/src/corelib/arch/qatomic_ia64.h
new file mode 100644
index 0000000000..42f30266d2
--- /dev/null
+++ b/src/corelib/arch/qatomic_ia64.h
@@ -0,0 +1,813 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_IA64_H
+#define QATOMIC_IA64_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+inline bool _q_ia64_fetchadd_immediate(register int value)
+{
+ return value == 1 || value == -1
+ || value == 4 || value == -4
+ || value == 8 || value == -8
+ || value == 16 || value == -16;
+}
+
+#if defined(Q_CC_INTEL)
+
+// intrinsics provided by the Intel C++ Compiler
+#include <ia64intrin.h>
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return static_cast<int>(_InterlockedExchange(&_q_value, newValue));
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ __memory_barrier();
+ return static_cast<int>(_InterlockedExchange(&_q_value, newValue));
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int expectedValueCopy = expectedValue;
+ return (static_cast<int>(_InterlockedCompareExchange(&_q_value,
+ newValue,
+ expectedValueCopy))
+ == expectedValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ register int expectedValueCopy = expectedValue;
+ return (static_cast<int>(_InterlockedCompareExchange_acq(reinterpret_cast<volatile uint *>(&_q_value),
+ newValue,
+ expectedValueCopy))
+ == expectedValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ register int expectedValueCopy = expectedValue;
+ return (static_cast<int>(_InterlockedCompareExchange_rel(reinterpret_cast<volatile uint *>(&_q_value),
+ newValue,
+ expectedValueCopy))
+ == expectedValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ __memory_barrier();
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ if (__builtin_constant_p(valueToAdd)) {
+ if (valueToAdd == 1)
+ return __fetchadd4_acq((unsigned int *)&_q_value, 1);
+ if (valueToAdd == -1)
+ return __fetchadd4_acq((unsigned int *)&_q_value, -1);
+ }
+ return _InterlockedExchangeAdd(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ if (__builtin_constant_p(valueToAdd)) {
+ if (valueToAdd == 1)
+ return __fetchadd4_rel((unsigned int *)&_q_value, 1);
+ if (valueToAdd == -1)
+ return __fetchadd4_rel((unsigned int *)&_q_value, -1);
+ }
+ __memory_barrier();
+ return _InterlockedExchangeAdd(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ __memory_barrier();
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+inline bool QBasicAtomicInt::ref()
+{
+ return _InterlockedIncrement(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return _InterlockedDecrement(&_q_value) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return (T *)_InterlockedExchangePointer(reinterpret_cast<void * volatile*>(&_q_value), newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ __memory_barrier();
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register T *expectedValueCopy = expectedValue;
+ return (_InterlockedCompareExchangePointer(reinterpret_cast<void * volatile*>(&_q_value),
+ newValue,
+ expectedValueCopy)
+ == expectedValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ union {
+ volatile void *x;
+ volatile unsigned long *p;
+ };
+ x = &_q_value;
+ register T *expectedValueCopy = expectedValue;
+ return (_InterlockedCompareExchange64_acq(p, quintptr(newValue), quintptr(expectedValueCopy))
+ == quintptr(expectedValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ union {
+ volatile void *x;
+ volatile unsigned long *p;
+ };
+ x = &_q_value;
+ register T *expectedValueCopy = expectedValue;
+ return (_InterlockedCompareExchange64_rel(p, quintptr(newValue), quintptr(expectedValueCopy))
+ == quintptr(expectedValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ __memory_barrier();
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return (T *)_InterlockedExchangeAdd64((volatile long *)&_q_value,
+ valueToAdd * sizeof(T));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ __memory_barrier();
+ return (T *)_InterlockedExchangeAdd64((volatile long *)&_q_value,
+ valueToAdd * sizeof(T));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ __memory_barrier();
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+#else // !Q_CC_INTEL
+
+# if defined(Q_CC_GNU)
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ int ret;
+ asm volatile("xchg4 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ int ret;
+ asm volatile("mf\n"
+ "xchg4 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ int ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg4.acq %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ int ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg4.rel %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ int ret;
+
+#if (__GNUC__ >= 4)
+ // We implement a fast fetch-and-add when we can
+ if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) {
+ asm volatile("fetchadd4.acq %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "i" (valueToAdd)
+ : "memory");
+ return ret;
+ }
+#endif
+
+ // otherwise, use a loop around test-and-set
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg4.acq %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd)
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ int ret;
+
+#if (__GNUC__ >= 4)
+ // We implement a fast fetch-and-add when we can
+ if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd)) {
+ asm volatile("fetchadd4.rel %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "i" (valueToAdd)
+ : "memory");
+ return ret;
+ }
+#endif
+
+ // otherwise, use a loop around test-and-set
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg4.rel %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd)
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ asm volatile("mf" ::: "memory");
+ return fetchAndAddRelease(valueToAdd);
+}
+
+inline bool QBasicAtomicInt::ref()
+{
+ int ret;
+ asm volatile("fetchadd4.acq %0=%1,1\n"
+ : "=r" (ret), "+m" (_q_value)
+ :
+ : "memory");
+ return ret != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ int ret;
+ asm volatile("fetchadd4.rel %0=%1,-1\n"
+ : "=r" (ret), "+m" (_q_value)
+ :
+ : "memory");
+ return ret != 1;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ T *ret;
+ asm volatile("xchg8 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ T *ret;
+ asm volatile("mf\n"
+ "xchg8 %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (newValue)
+ : "memory");
+ return ret;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ T *ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg8.acq %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ T *ret;
+ asm volatile("mov ar.ccv=%2\n"
+ ";;\n"
+ "cmpxchg8.rel %0=%1,%3,ar.ccv\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "r" (expectedValue), "r" (newValue)
+ : "memory");
+ return ret == expectedValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ T *ret;
+
+#if (__GNUC__ >= 4)
+ // We implement a fast fetch-and-add when we can
+ if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) {
+ asm volatile("fetchadd8.acq %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "i" (valueToAdd * sizeof(T))
+ : "memory");
+ return ret;
+ }
+#endif
+
+ // otherwise, use a loop around test-and-set
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg8.acq %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd * sizeof(T))
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ T *ret;
+
+#if (__GNUC__ >= 4)
+ // We implement a fast fetch-and-add when we can
+ if (__builtin_constant_p(valueToAdd) && _q_ia64_fetchadd_immediate(valueToAdd * sizeof(T))) {
+ asm volatile("fetchadd8.rel %0=%1,%2\n"
+ : "=r" (ret), "+m" (_q_value)
+ : "i" (valueToAdd * sizeof(T))
+ : "memory");
+ return ret;
+ }
+#endif
+
+ // otherwise, use a loop around test-and-set
+ ret = _q_value;
+ asm volatile("0:\n"
+ " mov r9=%0\n"
+ " mov ar.ccv=%0\n"
+ " add %0=%0, %2\n"
+ " ;;\n"
+ " cmpxchg8.rel %0=%1,%0,ar.ccv\n"
+ " ;;\n"
+ " cmp.ne p6,p0 = %0, r9\n"
+ "(p6) br.dptk 0b\n"
+ "1:\n"
+ : "+r" (ret), "+m" (_q_value)
+ : "r" (valueToAdd * sizeof(T))
+ : "r9", "p6", "memory");
+ return ret;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ asm volatile("mf" ::: "memory");
+ return fetchAndAddRelease(valueToAdd);
+}
+
+#elif defined Q_CC_HPACC
+
+QT_BEGIN_INCLUDE_NAMESPACE
+#include <ia64/sys/inline.h>
+QT_END_INCLUDE_NAMESPACE
+
+#define FENCE (_Asm_fence)(_UP_CALL_FENCE | _UP_SYS_FENCE | _DOWN_CALL_FENCE | _DOWN_SYS_FENCE)
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ _Asm_mf(FENCE);
+ return _Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (unsigned)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
+ int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (unsigned)newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)expectedValue, FENCE);
+ int ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, newValue, (_Asm_ldhint)_LDHINT_NONE);
+ return ret == expectedValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ if (valueToAdd == 1)
+ return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
+ else if (valueToAdd == -1)
+ return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
+
+ // implement the test-and-set loop
+ register int old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ if (valueToAdd == 1)
+ return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
+ else if (valueToAdd == -1)
+ return _Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE);
+
+ // implement the test-and-set loop
+ register int old, ret;
+ do {
+ old = _q_value;
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (unsigned)old, FENCE);
+ ret = _Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, old + valueToAdd, (_Asm_ldhint)_LDHINT_NONE);
+ } while (ret != old);
+ return old;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ _Asm_mf(FENCE);
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+inline bool QBasicAtomicInt::ref()
+{
+ return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, 1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return (int)_Asm_fetchadd((_Asm_fasz)_FASZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, -1, (_Asm_ldhint)_LDHINT_NONE, FENCE) != 1;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+#ifdef __LP64__
+ return (T *)_Asm_xchg((_Asm_sz)_SZ_D, &_q_value, (quint64)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+#else
+ return (T *)_Asm_xchg((_Asm_sz)_SZ_W, &_q_value, (quint32)newValue,
+ (_Asm_ldhint)_LDHINT_NONE, FENCE);
+#endif
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ _Asm_mf(FENCE);
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+#ifdef __LP64__
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE);
+ T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
+#else
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE);
+ T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE);
+#endif
+ return ret == expectedValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+#ifdef __LP64__
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)expectedValue, FENCE);
+ T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint64)newValue, (_Asm_ldhint)_LDHINT_NONE);
+#else
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)expectedValue, FENCE);
+ T *ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint32)newValue, (_Asm_ldhint)_LDHINT_NONE);
+#endif
+ return ret == expectedValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ // implement the test-and-set loop
+ register T *old, *ret;
+ do {
+ old = _q_value;
+#ifdef __LP64__
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
+ ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint64)(old + valueToAdd),
+ (_Asm_ldhint)_LDHINT_NONE);
+#else
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE);
+ ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_ACQ,
+ &_q_value, (quint32)(old + valueToAdd),
+ (_Asm_ldhint)_LDHINT_NONE);
+#endif
+ } while (old != ret);
+ return old;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ // implement the test-and-set loop
+ register T *old, *ret;
+ do {
+ old = _q_value;
+#ifdef __LP64__
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint64)old, FENCE);
+ ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_D, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint64)(old + valueToAdd),
+ (_Asm_ldhint)_LDHINT_NONE);
+#else
+ _Asm_mov_to_ar((_Asm_app_reg)_AREG_CCV, (quint32)old, FENCE);
+ ret = (T *)_Asm_cmpxchg((_Asm_sz)_SZ_W, (_Asm_sem)_SEM_REL,
+ &_q_value, (quint32)(old + valueToAdd),
+ (_Asm_ldhint)_LDHINT_NONE);
+#endif
+ } while (old != ret);
+ return old;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ _Asm_mf(FENCE);
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+#else
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
+} // extern "C"
+
+#endif
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+#endif // Q_CC_INTEL
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return fetchAndStoreRelease(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return fetchAndStoreRelaxed(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_IA64_H
diff --git a/src/corelib/arch/qatomic_integrity.h b/src/corelib/arch/qatomic_integrity.h
new file mode 100644
index 0000000000..6563903a25
--- /dev/null
+++ b/src/corelib/arch/qatomic_integrity.h
@@ -0,0 +1,289 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtGui module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_INTEGRITY_H
+#define QATOMIC_INTEGRITY_H
+
+#include <INTEGRITY.h>
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define qt_i2addr(a) reinterpret_cast<Address *>(const_cast<int *>(a))
+#define qt_p2addr(a) reinterpret_cast<Address *>(const_cast<void *>(a))
+#define qt_addr(a) reinterpret_cast<Address>(a)
+
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return true; }
+
+// Reference counting
+
+inline bool QBasicAtomicInt::ref()
+{
+ int oldval;
+ AtomicModify(qt_i2addr(&_q_value), qt_i2addr(&oldval), 0, 1);
+ return _q_value != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ int oldval;
+ AtomicModify(qt_i2addr(&_q_value), qt_i2addr(&oldval), 0, -1U);
+ return _q_value != 0;
+}
+
+// Test and set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return TestAndSet(qt_i2addr(&_q_value), expectedValue, newValue) == Success;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ int old_val;
+ do {
+ old_val = _q_value;
+ } while (TestAndSet(qt_i2addr(&_q_value), old_val, newValue) != Success);
+ return old_val;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ int old_val;
+ do {
+ old_val = _q_value;
+ } while (TestAndSet(qt_i2addr(&_q_value), old_val, old_val + valueToAdd) != Success);
+ return old_val;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return TestAndSet((Address*)&_q_value, qt_addr(expectedValue), qt_addr(newValue)) == Success;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ Address old_val;
+ do {
+ old_val = *reinterpret_cast<Address *>(const_cast<T *>(newValue));
+ } while (TestAndSet(reinterpret_cast<Address *>(const_cast<T **>(&_q_value)), old_val, qt_addr(newValue)) != Success);
+ return reinterpret_cast<T *>(old_val);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ AtomicModify(qt_p2addr(&_q_value), qt_addr(_q_value), qt_addr(_q_value) + valueToAdd * sizeof(T));
+ return _q_value;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_INTEGRITY_H
+
diff --git a/src/corelib/arch/qatomic_macosx.h b/src/corelib/arch/qatomic_macosx.h
new file mode 100644
index 0000000000..be4501f4d2
--- /dev/null
+++ b/src/corelib/arch/qatomic_macosx.h
@@ -0,0 +1,57 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_MACOSX_H
+#define QATOMIC_MACOSX_H
+
+QT_BEGIN_HEADER
+
+#if defined(__x86_64__)
+# include <QtCore/qatomic_x86_64.h>
+#elif defined(__i386__)
+# include <QtCore/qatomic_i386.h>
+#else // !__x86_64 && !__i386__
+# include <QtCore/qatomic_powerpc.h>
+#endif // !__x86_64__ && !__i386__
+
+QT_END_HEADER
+
+#endif // QATOMIC_MACOSX_H
diff --git a/src/corelib/arch/qatomic_mips.h b/src/corelib/arch/qatomic_mips.h
new file mode 100644
index 0000000000..6f2607c524
--- /dev/null
+++ b/src/corelib/arch/qatomic_mips.h
@@ -0,0 +1,892 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_MIPS_H
+#define QATOMIC_MIPS_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#if defined(Q_CC_GNU) && !defined(Q_OS_IRIX)
+
+#if _MIPS_SIM == _ABIO32
+#define SET_MIPS2 ".set mips2\n\t"
+#else
+#define SET_MIPS2
+#endif
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "addiu %[newValue], %[originalValue], %[one]\n"
+ "sc %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [one] "i" (1)
+ : "cc", "memory");
+ return originalValue != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "addiu %[newValue], %[originalValue], %[minusOne]\n"
+ "sc %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [minusOne] "i" (-1)
+ : "cc", "memory");
+ return originalValue != 1;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int result;
+ register int tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[result], %[_q_value]\n"
+ "xor %[result], %[result], %[expectedValue]\n"
+ "bnez %[result], 0f\n"
+ "nop\n"
+ "move %[tempValue], %[newValue]\n"
+ "sc %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "0:\n"
+ ".set pop\n"
+ : [result] "=&r" (result),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ register int result;
+ register int tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[result], %[_q_value]\n"
+ "xor %[result], %[result], %[expectedValue]\n"
+ "bnez %[result], 0f\n"
+ "nop\n"
+ "move %[tempValue], %[newValue]\n"
+ "sc %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "sync\n"
+ "0:\n"
+ ".set pop\n"
+ : [result] "=&r" (result),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ register int result;
+ register int tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "sync\n"
+ "0:\n"
+ "ll %[result], %[_q_value]\n"
+ "xor %[result], %[result], %[expectedValue]\n"
+ "bnez %[result], 0f\n"
+ "nop\n"
+ "move %[tempValue], %[newValue]\n"
+ "sc %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "0:\n"
+ ".set pop\n"
+ : [result] "=&r" (result),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ register int originalValue;
+ register int tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "move %[tempValue], %[newValue]\n"
+ "sc %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ register int originalValue;
+ register int tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "move %[tempValue], %[newValue]\n"
+ "sc %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "sync\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ register int originalValue;
+ register int tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "sync\n"
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "move %[tempValue], %[newValue]\n"
+ "sc %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "addu %[newValue], %[originalValue], %[valueToAdd]\n"
+ "sc %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [valueToAdd] "r" (valueToAdd)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "addu %[newValue], %[originalValue], %[valueToAdd]\n"
+ "sc %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ "sync\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [valueToAdd] "r" (valueToAdd)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "sync\n"
+ "0:\n"
+ "ll %[originalValue], %[_q_value]\n"
+ "addu %[newValue], %[originalValue], %[valueToAdd]\n"
+ "sc %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [valueToAdd] "r" (valueToAdd)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+#if defined(__LP64__)
+# define LLP "lld"
+# define SCP "scd"
+#else
+# define LLP "ll"
+# define SCP "sc"
+#endif
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register T *result;
+ register T *tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ LLP" %[result], %[_q_value]\n"
+ "xor %[result], %[result], %[expectedValue]\n"
+ "bnez %[result], 0f\n"
+ "nop\n"
+ "move %[tempValue], %[newValue]\n"
+ SCP" %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "0:\n"
+ ".set pop\n"
+ : [result] "=&r" (result),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ register T *result;
+ register T *tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ LLP" %[result], %[_q_value]\n"
+ "xor %[result], %[result], %[expectedValue]\n"
+ "bnez %[result], 0f\n"
+ "nop\n"
+ "move %[tempValue], %[newValue]\n"
+ SCP" %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "sync\n"
+ "0:\n"
+ ".set pop\n"
+ : [result] "=&r" (result),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ register T *result;
+ register T *tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "sync\n"
+ "0:\n"
+ LLP" %[result], %[_q_value]\n"
+ "xor %[result], %[result], %[expectedValue]\n"
+ "bnez %[result], 0f\n"
+ "nop\n"
+ "move %[tempValue], %[newValue]\n"
+ SCP" %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "0:\n"
+ ".set pop\n"
+ : [result] "=&r" (result),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ register T *originalValue;
+ register T *tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ LLP" %[originalValue], %[_q_value]\n"
+ "move %[tempValue], %[newValue]\n"
+ SCP" %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ register T *originalValue;
+ register T *tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ LLP" %[originalValue], %[_q_value]\n"
+ "move %[tempValue], %[newValue]\n"
+ SCP" %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ "sync\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ register T *originalValue;
+ register T *tempValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "sync\n"
+ "0:\n"
+ LLP" %[originalValue], %[_q_value]\n"
+ "move %[tempValue], %[newValue]\n"
+ SCP" %[tempValue], %[_q_value]\n"
+ "beqz %[tempValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [tempValue] "=&r" (tempValue),
+ [_q_value] "+m" (_q_value)
+ : [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ LLP" %[originalValue], %[_q_value]\n"
+ "addu %[newValue], %[originalValue], %[valueToAdd]\n"
+ SCP" %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "0:\n"
+ LLP" %[originalValue], %[_q_value]\n"
+ "addu %[newValue], %[originalValue], %[valueToAdd]\n"
+ SCP" %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ "sync\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ asm volatile(".set push\n"
+ SET_MIPS2
+ "sync\n"
+ "0:\n"
+ LLP" %[originalValue], %[_q_value]\n"
+ "addu %[newValue], %[originalValue], %[valueToAdd]\n"
+ SCP" %[newValue], %[_q_value]\n"
+ "beqz %[newValue], 0b\n"
+ "nop\n"
+ ".set pop\n"
+ : [originalValue] "=&r" (originalValue),
+ [_q_value] "+m" (_q_value),
+ [newValue] "=&r" (newValue)
+ : [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+#else // !Q_CC_GNU
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_acquire_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_release_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_acquire_ptr(volatile void *ptr, void *expected, void *newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_release_ptr(volatile void *ptr, void *expected, void *newval);
+} // extern "C"
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int expected;
+ for (;;) {
+ expected = _q_value;
+ if (q_atomic_test_and_set_int(&_q_value, expected, expected + 1))
+ break;
+ }
+ return expected != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int expected;
+ for (;;) {
+ expected = _q_value;
+ if (q_atomic_test_and_set_int(&_q_value, expected, expected - 1))
+ break;
+ }
+ return expected != 1;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_acquire_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_release_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_acquire_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetRelaxed(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetAcquire(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetRelease(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetOrdered(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetRelaxed(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetAcquire(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetRelease(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ int returnValue;
+ for (;;) {
+ returnValue = _q_value;
+ if (testAndSetOrdered(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_acquire_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_release_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_acquire_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetRelaxed(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetAcquire(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetRelease(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetOrdered(returnValue, newValue))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetRelaxed(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE
+T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetAcquire(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetRelease(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ T *returnValue;
+ for (;;) {
+ returnValue = (_q_value);
+ if (testAndSetOrdered(returnValue, returnValue + valueToAdd))
+ break;
+ }
+ return returnValue;
+}
+
+#endif // Q_CC_GNU
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_MIPS_H
diff --git a/src/corelib/arch/qatomic_parisc.h b/src/corelib/arch/qatomic_parisc.h
new file mode 100644
index 0000000000..eeff9266ee
--- /dev/null
+++ b/src/corelib/arch/qatomic_parisc.h
@@ -0,0 +1,305 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_PARISC_H
+#define QATOMIC_PARISC_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+extern "C" {
+ Q_CORE_EXPORT void q_atomic_lock(int *lock);
+ Q_CORE_EXPORT void q_atomic_unlock(int *lock);
+}
+
+// Reference counting
+
+inline bool QBasicAtomicInt::ref()
+{
+ q_atomic_lock(_q_lock);
+ bool ret = (++_q_value != 0);
+ q_atomic_unlock(_q_lock);
+ return ret;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ q_atomic_lock(_q_lock);
+ bool ret = (--_q_value != 0);
+ q_atomic_unlock(_q_lock);
+ return ret;
+}
+
+// Test-and-set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ q_atomic_lock(_q_lock);
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ q_atomic_unlock(_q_lock);
+ return true;
+ }
+ q_atomic_unlock(_q_lock);
+ return false;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch-and-store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ q_atomic_lock(_q_lock);
+ int returnValue = _q_value;
+ _q_value = newValue;
+ q_atomic_unlock(_q_lock);
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch-and-add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ q_atomic_lock(_q_lock);
+ int originalValue = _q_value;
+ _q_value += valueToAdd;
+ q_atomic_unlock(_q_lock);
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ q_atomic_lock(_q_lock);
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ q_atomic_unlock(_q_lock);
+ return true;
+ }
+ q_atomic_unlock(_q_lock);
+ return false;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ q_atomic_lock(_q_lock);
+ T *returnValue = (_q_value);
+ _q_value = newValue;
+ q_atomic_unlock(_q_lock);
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ q_atomic_lock(_q_lock);
+ T *returnValue = (_q_value);
+ _q_value += valueToAdd;
+ q_atomic_unlock(_q_lock);
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_PARISC_H
diff --git a/src/corelib/arch/qatomic_powerpc.h b/src/corelib/arch/qatomic_powerpc.h
new file mode 100644
index 0000000000..e261ff7192
--- /dev/null
+++ b/src/corelib/arch/qatomic_powerpc.h
@@ -0,0 +1,648 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_POWERPC_H
+#define QATOMIC_POWERPC_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#if defined(Q_CC_GNU)
+
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2) \
+ || (!defined(__64BIT__) && !defined(__powerpc64__) && !defined(__ppc64__))
+# define _Q_VALUE "0, %[_q_value]"
+# define _Q_VALUE_MEMORY_OPERAND "+m" (_q_value)
+# define _Q_VALUE_REGISTER_OPERAND [_q_value] "r" (&_q_value),
+#else
+// On 64-bit with gcc >= 4.2
+# define _Q_VALUE "%y[_q_value]"
+# define _Q_VALUE_MEMORY_OPERAND [_q_value] "+Z" (_q_value)
+# define _Q_VALUE_REGISTER_OPERAND
+#endif
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile("lwarx %[originalValue]," _Q_VALUE "\n"
+ "addi %[newValue], %[originalValue], %[one]\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ : [originalValue] "=&b" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [one] "i" (1)
+ : "cc", "memory");
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile("lwarx %[originalValue]," _Q_VALUE "\n"
+ "addi %[newValue], %[originalValue], %[minusOne]\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ : [originalValue] "=&b" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [minusOne] "i" (-1)
+ : "cc", "memory");
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("lwarx %[result]," _Q_VALUE "\n"
+ "xor. %[result], %[result], %[expectedValue]\n"
+ "bne $+12\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-16\n"
+ : [result] "=&r" (result),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("lwarx %[result]," _Q_VALUE "\n"
+ "xor. %[result], %[result], %[expectedValue]\n"
+ "bne $+16\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-16\n"
+ "isync\n"
+ : [result] "=&r" (result),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("eieio\n"
+ "lwarx %[result]," _Q_VALUE "\n"
+ "xor. %[result], %[result], %[expectedValue]\n"
+ "bne $+12\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-16\n"
+ : [result] "=&r" (result),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ register int originalValue;
+ asm volatile("lwarx %[originalValue]," _Q_VALUE "\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-8\n"
+ : [originalValue] "=&r" (originalValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ register int originalValue;
+ asm volatile("lwarx %[originalValue]," _Q_VALUE "\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-8\n"
+ "isync\n"
+ : [originalValue] "=&r" (originalValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ register int originalValue;
+ asm volatile("eieio\n"
+ "lwarx %[originalValue]," _Q_VALUE "\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-8\n"
+ : [originalValue] "=&r" (originalValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile("lwarx %[originalValue]," _Q_VALUE "\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [valueToAdd] "r" (valueToAdd)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile("lwarx %[originalValue]," _Q_VALUE "\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ "isync\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [valueToAdd] "r" (valueToAdd)
+ : "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ register int originalValue;
+ register int newValue;
+ asm volatile("eieio\n"
+ "lwarx %[originalValue]," _Q_VALUE "\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ "stwcx. %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [valueToAdd] "r" (valueToAdd)
+ : "cc", "memory");
+ return originalValue;
+}
+
+#if defined(__64BIT__) || defined(__powerpc64__) || defined(__ppc64__)
+# define LPARX "ldarx"
+# define STPCX "stdcx."
+#else
+# define LPARX "lwarx"
+# define STPCX "stwcx."
+#endif
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register void *result;
+ asm volatile(LPARX" %[result]," _Q_VALUE "\n"
+ "xor. %[result], %[result], %[expectedValue]\n"
+ "bne $+12\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-16\n"
+ : [result] "=&r" (result),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ register void *result;
+ asm volatile(LPARX" %[result]," _Q_VALUE "\n"
+ "xor. %[result], %[result], %[expectedValue]\n"
+ "bne $+16\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-16\n"
+ "isync\n"
+ : [result] "=&r" (result),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ register void *result;
+ asm volatile("eieio\n"
+ LPARX" %[result]," _Q_VALUE "\n"
+ "xor. %[result], %[result], %[expectedValue]\n"
+ "bne $+12\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-16\n"
+ : [result] "=&r" (result),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ register T *originalValue;
+ asm volatile(LPARX" %[originalValue]," _Q_VALUE "\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-8\n"
+ : [originalValue] "=&r" (originalValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ register T *originalValue;
+ asm volatile(LPARX" %[originalValue]," _Q_VALUE "\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-8\n"
+ "isync\n"
+ : [originalValue] "=&r" (originalValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ register T *originalValue;
+ asm volatile("eieio\n"
+ LPARX" %[originalValue]," _Q_VALUE "\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-8\n"
+ : [originalValue] "=&r" (originalValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [newValue] "r" (newValue)
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ asm volatile(LPARX" %[originalValue]," _Q_VALUE "\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ asm volatile(LPARX" %[originalValue]," _Q_VALUE "\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ "isync\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ register T *newValue;
+ asm volatile("eieio\n"
+ LPARX" %[originalValue]," _Q_VALUE "\n"
+ "add %[newValue], %[originalValue], %[valueToAdd]\n"
+ STPCX" %[newValue]," _Q_VALUE "\n"
+ "bne- $-12\n"
+ : [originalValue] "=&r" (originalValue),
+ [newValue] "=&r" (newValue),
+ _Q_VALUE_MEMORY_OPERAND
+ : _Q_VALUE_REGISTER_OPERAND
+ [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "cc", "memory");
+ return originalValue;
+}
+
+#undef LPARX
+#undef STPCX
+#undef _Q_VALUE
+#undef _Q_VALUE_MEMORY_OPERAND
+#undef _Q_VALUE_REGISTER_OPERAND
+
+#else
+
+extern "C" {
+ int q_atomic_test_and_set_int(volatile int *ptr, int expectedValue, int newValue);
+ int q_atomic_test_and_set_acquire_int(volatile int *ptr, int expectedValue, int newValue);
+ int q_atomic_test_and_set_release_int(volatile int *ptr, int expectedValue, int newValue);
+ int q_atomic_test_and_set_ptr(volatile void *ptr, void *expectedValue, void *newValue);
+ int q_atomic_test_and_set_acquire_ptr(volatile void *ptr, void *expectedValue, void *newValue);
+ int q_atomic_test_and_set_release_ptr(volatile void *ptr, void *expectedValue, void *newValue);
+ int q_atomic_increment(volatile int *);
+ int q_atomic_decrement(volatile int *);
+ int q_atomic_set_int(volatile int *, int);
+ int q_atomic_fetch_and_store_acquire_int(volatile int *ptr, int newValue);
+ int q_atomic_fetch_and_store_release_int(volatile int *ptr, int newValue);
+ void *q_atomic_set_ptr(volatile void *, void *);
+ int q_atomic_fetch_and_store_acquire_ptr(volatile void *ptr, void *newValue);
+ int q_atomic_fetch_and_store_release_ptr(volatile void *ptr, void *newValue);
+ int q_atomic_fetch_and_add_int(volatile int *ptr, int valueToAdd);
+ int q_atomic_fetch_and_add_acquire_int(volatile int *ptr, int valueToAdd);
+ int q_atomic_fetch_and_add_release_int(volatile int *ptr, int valueToAdd);
+ void *q_atomic_fetch_and_add_ptr(volatile void *ptr, qptrdiff valueToAdd);
+ void *q_atomic_fetch_and_add_acquire_ptr(volatile void *ptr, qptrdiff valueToAdd);
+ void *q_atomic_fetch_and_add_release_ptr(volatile void *ptr, qptrdiff valueToAdd);
+} // extern "C"
+
+
+inline bool QBasicAtomicInt::ref()
+{
+ return q_atomic_increment(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return q_atomic_decrement(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_acquire_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_release_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return q_atomic_set_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return q_atomic_fetch_and_store_acquire_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return q_atomic_fetch_and_store_release_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_int(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_acquire_int(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return q_atomic_fetch_and_add_release_int(&_q_value, valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_acquire_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_release_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_set_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_acquire_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_release_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_acquire_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_release_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+#endif
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_POWERPC_H
diff --git a/src/corelib/arch/qatomic_s390.h b/src/corelib/arch/qatomic_s390.h
new file mode 100644
index 0000000000..552ebe43fe
--- /dev/null
+++ b/src/corelib/arch/qatomic_s390.h
@@ -0,0 +1,426 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_S390_H
+#define QATOMIC_S390_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+#ifdef __GNUC__
+#define __GNU_EXTENSION __extension__
+#else
+#define __GNU_EXTENSION
+#endif
+
+#define __CS_LOOP(ptr, op_val, op_string, pre, post) __GNU_EXTENSION ({ \
+ volatile int old_val, new_val; \
+ __asm__ __volatile__(pre \
+ " l %0,0(%3)\n" \
+ "0: lr %1,%0\n" \
+ op_string " %1,%4\n" \
+ " cs %0,%1,0(%3)\n" \
+ " jl 0b\n" \
+ post \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=m" (*ptr) \
+ : "a" (ptr), "d" (op_val), \
+ "m" (*ptr) \
+ : "cc", "memory" ); \
+ new_val; \
+})
+
+#define __CS_OLD_LOOP(ptr, op_val, op_string, pre, post ) __GNU_EXTENSION ({ \
+ volatile int old_val, new_val; \
+ __asm__ __volatile__(pre \
+ " l %0,0(%3)\n" \
+ "0: lr %1,%0\n" \
+ op_string " %1,%4\n" \
+ " cs %0,%1,0(%3)\n" \
+ " jl 0b\n" \
+ post \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=m" (*ptr) \
+ : "a" (ptr), "d" (op_val), \
+ "m" (*ptr) \
+ : "cc", "memory" ); \
+ old_val; \
+})
+
+#ifdef __s390x__
+#define __CSG_OLD_LOOP(ptr, op_val, op_string, pre, post) __GNU_EXTENSION ({ \
+ long old_val, new_val; \
+ __asm__ __volatile__(pre \
+ " lg %0,0(%3)\n" \
+ "0: lgr %1,%0\n" \
+ op_string " %1,%4\n" \
+ " csg %0,%1,0(%3)\n" \
+ " jl 0b\n" \
+ post \
+ : "=&d" (old_val), "=&d" (new_val), \
+ "=m" (*ptr) \
+ : "a" (ptr), "d" (op_val), \
+ "m" (*ptr) \
+ : "cc", "memory" ); \
+ old_val; \
+})
+#endif
+
+inline bool QBasicAtomicInt::ref()
+{
+ return __CS_LOOP(&_q_value, 1, "ar", "", "") != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return __CS_LOOP(&_q_value, 1, "sr", "", "") != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ int retval;
+ __asm__ __volatile__(
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+ return retval == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ int retval;
+ __asm__ __volatile__(
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:\n"
+ " bcr 15,0\n"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+ return retval == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ int retval;
+ __asm__ __volatile__(
+ " bcr 15,0\n"
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+ return retval == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return __CS_OLD_LOOP(&_q_value, newValue, "lr", "", "");
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return __CS_OLD_LOOP(&_q_value, newValue, "lr", "", "bcr 15,0\n");
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return __CS_OLD_LOOP(&_q_value, newValue, "lr", "bcr 15,0\n", "");
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return __CS_OLD_LOOP(&_q_value, valueToAdd, "ar", "", "bcr 15,0\n");
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ int retval;
+
+#ifndef __s390x__
+ __asm__ __volatile__(
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+#else
+ __asm__ __volatile__(
+ " lgr %0,%3\n"
+ " csg %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+#endif
+
+ return retval == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ int retval;
+
+#ifndef __s390x__
+ __asm__ __volatile__(
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:\n"
+ " bcr 15,0\n"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+#else
+ __asm__ __volatile__(
+ " lgr %0,%3\n"
+ " csg %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:\n"
+ " bcr 15,0\n"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+#endif
+
+ return retval == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ int retval;
+
+#ifndef __s390x__
+ __asm__ __volatile__(
+ " bcr 15,0\n"
+ " lr %0,%3\n"
+ " cs %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+#else
+ __asm__ __volatile__(
+ " bcr 15,0\n"
+ " lgr %0,%3\n"
+ " csg %0,%4,0(%2)\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ "0:"
+ : "=&d" (retval), "=m" (_q_value)
+ : "a" (&_q_value), "d" (expectedValue) , "d" (newValue),
+ "m" (_q_value) : "cc", "memory" );
+#endif
+
+ return retval == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T* QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+#ifndef __s390x__
+ return (T*)__CS_OLD_LOOP(&_q_value, (int)newValue, "lr", "", "");
+#else
+ return (T*)__CSG_OLD_LOOP(&_q_value, (long)newValue, "lgr", "", "");
+#endif
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T* QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+#ifndef __s390x__
+ return (T*)__CS_OLD_LOOP(&_q_value, (int)newValue, "lr", "", "bcr 15,0 \n");
+#else
+ return (T*)__CSG_OLD_LOOP(&_q_value, (long)newValue, "lgr", "", "bcr 15,0 \n");
+#endif
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T* QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+#ifndef __s390x__
+ return (T*)__CS_OLD_LOOP(&_q_value, (int)newValue, "lr", "bcr 15,0 \n", "");
+#else
+ return (T*)__CSG_OLD_LOOP(&_q_value, (long)newValue, "lgr", "bcr 15,0\n", "");
+#endif
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T* QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+#undef __GNU_EXTENSION
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_S390_H
diff --git a/src/corelib/arch/qatomic_sh.h b/src/corelib/arch/qatomic_sh.h
new file mode 100644
index 0000000000..0150ca4443
--- /dev/null
+++ b/src/corelib/arch/qatomic_sh.h
@@ -0,0 +1,330 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_SH_H
+#define QATOMIC_SH_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+extern Q_CORE_EXPORT volatile char qt_atomic_lock;
+Q_CORE_EXPORT void qt_atomic_yield(int *count);
+
+inline int qt_atomic_tasb(volatile char *ptr)
+{
+ register int ret;
+ asm volatile("tas.b @%2\n"
+ "movt %0"
+ : "=&r"(ret), "=m"(*ptr)
+ : "r"(ptr)
+ : "cc", "memory");
+ return ret;
+}
+
+// Reference counting
+
+inline bool QBasicAtomicInt::ref()
+{
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value++;
+ qt_atomic_lock = 0;
+ return originalValue != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value--;
+ qt_atomic_lock = 0;
+ return originalValue != 1;
+}
+
+// Test and set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ bool returnValue = false;
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ returnValue = true;
+ }
+ qt_atomic_lock = 0;
+ return returnValue;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value;
+ _q_value = newValue;
+ qt_atomic_lock = 0;
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ int originalValue = _q_value;
+ _q_value += valueToAdd;
+ qt_atomic_lock = 0;
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ bool returnValue = false;
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ returnValue = true;
+ }
+ qt_atomic_lock = 0;
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ T *originalValue = _q_value;
+ _q_value = newValue;
+ qt_atomic_lock = 0;
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ int count = 0;
+ while (qt_atomic_tasb(&qt_atomic_lock) == 0)
+ qt_atomic_yield(&count);
+ T *originalValue = (_q_value);
+ _q_value += valueToAdd;
+ qt_atomic_lock = 0;
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_SH_H
diff --git a/src/corelib/arch/qatomic_sh4a.h b/src/corelib/arch/qatomic_sh4a.h
new file mode 100644
index 0000000000..88fd4d451d
--- /dev/null
+++ b/src/corelib/arch/qatomic_sh4a.h
@@ -0,0 +1,537 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_SH4A_H
+#define QATOMIC_SH4A_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+QT_BEGIN_NAMESPACE
+
+#if !defined(Q_CC_GNU)
+# error "SH-4A support has not been added for this compiler"
+#else
+
+inline bool QBasicAtomicInt::ref()
+{
+ register int newValue asm("r0");
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], %[newValue]\n"
+ "add #1,%[newValue]\n"
+ "movco.l %[newValue], @%[_q_value]\n"
+ "bf 0b\n"
+ : [newValue] "=&r" (newValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value)
+ : "cc", "memory");
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ register int newValue asm("r0");
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], %[newValue]\n"
+ "add #-1,%[newValue]\n"
+ "movco.l %[newValue], @%[_q_value]\n"
+ "bf 0b\n"
+ : [newValue] "=&r" (newValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value)
+ : "cc", "memory");
+ return newValue != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "xor %[expectedValue], r0\n"
+ "cmp/eq #0, r0\n"
+ "bf/s 0f\n"
+ "mov r0, %[result]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "0:\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "xor %[expectedValue], r0\n"
+ "cmp/eq #0, r0\n"
+ "bf/s 0f\n"
+ "mov r0, %[result]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "synco\n"
+ "0:\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ register int result;
+ asm volatile("synco\n"
+ "0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "xor %[expectedValue], r0\n"
+ "cmp/eq #0, r0\n"
+ "bf/s 0f\n"
+ "mov r0, %[result]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "0:\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return result == 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ register int originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ register int originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "synco\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ register int originalValue;
+ asm volatile("synco\n"
+ "0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ register int originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "add %[valueToAdd], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [valueToAdd] "r" (valueToAdd)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ register int originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "add %[valueToAdd], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "synco\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [valueToAdd] "r" (valueToAdd)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ register int originalValue;
+ asm volatile("synco\n"
+ "0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "add %[valueToAdd], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [valueToAdd] "r" (valueToAdd)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ register T *result;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "xor %[expectedValue], r0\n"
+ "cmp/eq #0, r0\n"
+ "bf/s 0f\n"
+ "mov r0, %[result]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "0:\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ register T *result;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "xor %[expectedValue], r0\n"
+ "cmp/eq #0, r0\n"
+ "bf/s 0f\n"
+ "mov r0, %[result]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "synco\n"
+ "0:\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ register T *result;
+ asm volatile("synco\n"
+ "0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "xor %[expectedValue], r0\n"
+ "cmp/eq #0, r0\n"
+ "bf/s 0f\n"
+ "mov r0, %[result]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "0:\n"
+ : [result] "=&r" (result),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [expectedValue] "r" (expectedValue),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return result == 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return testAndSetAcquire(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ register T *originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ register T *originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "synco\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ register T *originalValue;
+ asm volatile("synco\n"
+ "0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "mov %[newValue], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [newValue] "r" (newValue)
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return fetchAndStoreAcquire(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "add %[valueToAdd], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ asm volatile("0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "add %[valueToAdd], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ "synco\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ register T *originalValue;
+ asm volatile("synco\n"
+ "0:\n"
+ "movli.l @%[_q_value], r0\n"
+ "mov r0, %[originalValue]\n"
+ "add %[valueToAdd], r0\n"
+ "movco.l r0, @%[_q_value]\n"
+ "bf 0b\n"
+ : [originalValue] "=&r" (originalValue),
+ "+m" (_q_value)
+ : [_q_value] "r" (&_q_value),
+ [valueToAdd] "r" (valueToAdd * sizeof(T))
+ : "r0", "cc", "memory");
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return fetchAndAddAcquire(valueToAdd);
+}
+
+#endif // Q_CC_GNU
+
+#endif // QATOMIC_SH4A_H
diff --git a/src/corelib/arch/qatomic_sparc.h b/src/corelib/arch/qatomic_sparc.h
new file mode 100644
index 0000000000..a89a5f3fe5
--- /dev/null
+++ b/src/corelib/arch/qatomic_sparc.h
@@ -0,0 +1,525 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_SPARC_H
+#define QATOMIC_SPARC_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#if defined(_LP64)
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_increment(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_decrement(volatile int *ptr);
+
+ Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_acquire_int(volatile int *ptr,
+ int expected,
+ int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_release_int(volatile int *ptr,
+ int expected,
+ int newval);
+
+ Q_CORE_EXPORT int q_atomic_set_int(volatile int *ptr, int newval);
+ Q_CORE_EXPORT int q_atomic_fetch_and_store_acquire_int(volatile int *ptr, int newval);
+ Q_CORE_EXPORT int q_atomic_fetch_and_store_release_int(volatile int *ptr, int newval);
+
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_int(volatile int *ptr, int value);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_acquire_int(volatile int *ptr, int value);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_release_int(volatile int *ptr, int value);
+
+ Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_acquire_ptr(volatile void *ptr,
+ void *expected,
+ void *newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_release_ptr(volatile void *ptr,
+ void *expected,
+ void *newval);
+
+ Q_CORE_EXPORT void *q_atomic_set_ptr(volatile void *ptr, void *newval);
+ Q_CORE_EXPORT void *q_atomic_fetch_and_store_acquire_ptr(volatile void *ptr, void *newval);
+ Q_CORE_EXPORT void *q_atomic_fetch_and_store_release_ptr(volatile void *ptr, void *newval);
+
+ Q_CORE_EXPORT void *q_atomic_fetch_and_add_ptr(volatile void *ptr, int value);
+ Q_CORE_EXPORT void *q_atomic_fetch_and_add_acquire_ptr(volatile void *ptr, int value);
+ Q_CORE_EXPORT void *q_atomic_fetch_and_add_release_ptr(volatile void *ptr, int value);
+}
+
+inline bool QBasicAtomicInt::ref()
+{
+ return fetchAndAddRelaxed(1) != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return fetchAndAddRelaxed(-1) != 1;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_acquire_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_release_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return q_atomic_test_and_set_acquire_int(&_q_value, expectedValue, newValue) != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return q_atomic_set_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return q_atomic_fetch_and_store_acquire_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return q_atomic_fetch_and_store_release_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return q_atomic_fetch_and_store_acquire_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int newValue)
+{
+ return q_atomic_fetch_and_add_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int newValue)
+{
+ return q_atomic_fetch_and_add_acquire_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int newValue)
+{
+ return q_atomic_fetch_and_add_release_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int newValue)
+{
+ return q_atomic_fetch_and_add_acquire_int(&_q_value, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_acquire_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_release_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_acquire_ptr(&_q_value, expectedValue, newValue) != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_set_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_acquire_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_release_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_store_acquire_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE
+T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_acquire_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_release_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_acquire_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+#else
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_lock_int(volatile int *addr);
+ Q_CORE_EXPORT int q_atomic_lock_ptr(volatile void *addr);
+ Q_CORE_EXPORT void q_atomic_unlock(volatile void *addr, int value);
+ Q_CORE_EXPORT int q_atomic_set_int(volatile int *ptr, int newval);
+ Q_CORE_EXPORT void *q_atomic_set_ptr(volatile void *ptr, void *newval);
+} // extern "C"
+
+inline bool QBasicAtomicInt::ref()
+{
+ const int val = q_atomic_lock_int(&_q_value);
+ q_atomic_unlock(&_q_value, val + 1);
+ return val != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ const int val = q_atomic_lock_int(&_q_value);
+ q_atomic_unlock(&_q_value, val - 1);
+ return val != 1;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ int val = q_atomic_lock_int(&_q_value);
+ if (val == expectedValue) {
+ q_atomic_unlock(&_q_value, newValue);
+ return true;
+ }
+ q_atomic_unlock(&_q_value, val);
+ return false;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return q_atomic_set_int(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ const int originalValue = q_atomic_lock_int(&_q_value);
+ q_atomic_unlock(&_q_value, originalValue + valueToAdd);
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ T *val = reinterpret_cast<T *>(q_atomic_lock_ptr(&_q_value));
+ if (val == expectedValue) {
+ q_atomic_unlock(&_q_value, reinterpret_cast<int>(newValue));
+ return true;
+ }
+ q_atomic_unlock(&_q_value, reinterpret_cast<int>(val));
+ return false;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_set_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ T *originalValue = reinterpret_cast<T *>(q_atomic_lock_ptr(&_q_value));
+ q_atomic_unlock(&_q_value, int(originalValue + valueToAdd));
+ return originalValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+#endif // _LP64
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_SPARC_H
diff --git a/src/corelib/arch/qatomic_symbian.h b/src/corelib/arch/qatomic_symbian.h
new file mode 100644
index 0000000000..c4cd301218
--- /dev/null
+++ b/src/corelib/arch/qatomic_symbian.h
@@ -0,0 +1,313 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_SYMBIAN_H
+#define QATOMIC_SYMBIAN_H
+
+#include <QtCore/qglobal.h>
+#include <e32std.h>
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+QT_MODULE(Core)
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_SOMETIMES_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_SOMETIMES_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_SOMETIMES_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_SOMETIMES_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_SOMETIMES_NATIVE
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_isTestAndSetNative();
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return QBasicAtomicPointer_isTestAndSetNative(); }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_SOMETIMES_NATIVE
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_isFetchAndStoreNative();
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return QBasicAtomicPointer_isFetchAndStoreNative(); }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_SOMETIMES_NATIVE
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_isFetchAndAddNative();
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return QBasicAtomicPointer_isFetchAndAddNative(); }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+Q_CORE_EXPORT bool QBasicAtomicInt_testAndSetOrdered(volatile int *, int, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndAddOrdered(volatile int *, int);
+Q_CORE_EXPORT bool QBasicAtomicInt_testAndSetRelaxed(volatile int *, int, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *, int);
+Q_CORE_EXPORT bool QBasicAtomicInt_testAndSetAcquire(volatile int *, int, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndAddAcquire(volatile int *, int);
+Q_CORE_EXPORT bool QBasicAtomicInt_testAndSetRelease(volatile int *, int, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndStoreRelease(volatile int *, int);
+Q_CORE_EXPORT int QBasicAtomicInt_fetchAndAddRelease(volatile int *, int);
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *, void *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *, qptrdiff);
+Q_CORE_EXPORT bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *, void *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *, qptrdiff);
+Q_CORE_EXPORT bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *, void *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *, qptrdiff);
+Q_CORE_EXPORT bool QBasicAtomicPointer_testAndSetRelease(void * volatile *, void *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *, void *);
+Q_CORE_EXPORT void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *, qptrdiff);
+
+// Reference counting
+
+//LockedInc and LockedDec are machine coded for ARMv6 (and future proof)
+inline bool QBasicAtomicInt::ref()
+{
+ int original = User::LockedInc((TInt&)_q_value);
+ return original != -1;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ int original = User::LockedDec((TInt&)_q_value);
+ return original != 1;
+}
+
+// Test and set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetOrdered(&_q_value, expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetRelaxed(&_q_value, expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetAcquire(&_q_value, expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetRelease(&_q_value, expectedValue, newValue);
+}
+
+// Fetch and store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreOrdered(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreRelaxed(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreAcquire(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreRelease(&_q_value, newValue);
+}
+
+// Fetch and add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddRelaxed(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddAcquire(&_q_value, valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddRelease(&_q_value, valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return QBasicAtomicPointer_testAndSetOrdered(reinterpret_cast<void * volatile *>(&_q_value),
+ expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return QBasicAtomicPointer_testAndSetRelaxed(reinterpret_cast<void * volatile *>(&_q_value),
+ expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return QBasicAtomicPointer_testAndSetAcquire(reinterpret_cast<void * volatile *>(&_q_value),
+ expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return QBasicAtomicPointer_testAndSetRelease(reinterpret_cast<void * volatile *>(&_q_value),
+ expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndStoreOrdered(
+ reinterpret_cast<void * volatile *>(&_q_value)
+ , newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndStoreRelaxed(
+ reinterpret_cast<void * volatile *>(&_q_value)
+ , newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndStoreAcquire(
+ reinterpret_cast<void * volatile *>(&_q_value)
+ , newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndStoreRelease(
+ reinterpret_cast<void * volatile *>(&_q_value)
+ , newValue));
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndAddOrdered(
+ reinterpret_cast<void * volatile *>(&_q_value),
+ valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndAddRelaxed(
+ reinterpret_cast<void * volatile *>(&_q_value),
+ valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndAddAcquire(
+ reinterpret_cast<void * volatile *>(&_q_value),
+ valueToAdd * sizeof(T)));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return static_cast<T*>(QBasicAtomicPointer_fetchAndAddRelease(
+ reinterpret_cast<void * volatile *>(&_q_value),
+ valueToAdd * sizeof(T)));
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_SYMBIAN_H
diff --git a/src/corelib/arch/qatomic_vxworks.h b/src/corelib/arch/qatomic_vxworks.h
new file mode 100644
index 0000000000..9386d19ece
--- /dev/null
+++ b/src/corelib/arch/qatomic_vxworks.h
@@ -0,0 +1,318 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the qmake spec of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_VXWORKS_H
+#define QATOMIC_VXWORKS_H
+
+QT_BEGIN_HEADER
+
+#if defined(__ppc)
+# include <QtCore/qatomic_powerpc.h>
+#else // generic implementation with taskLock()
+
+#if 0
+// we don't want to include the system header here for two function prototypes,
+// because it pulls in a _lot_ of stuff that pollutes the global namespace
+# include <vxWorksCommon.h>
+# include <taskLib.h>
+#else
+extern "C" int taskLock();
+extern "C" int taskUnlock();
+#endif
+
+
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return false; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return false; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_NOT_NATIVE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return false; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return false; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_NOT_NATIVE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return false; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return false; }
+
+// Reference counting
+
+inline bool QBasicAtomicInt::ref()
+{
+ taskLock();
+ bool ret = (++_q_value != 0);
+ taskUnlock();
+ return ret;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ taskLock();
+ bool ret = (--_q_value != 0);
+ taskUnlock();
+ return ret;
+}
+
+// Test-and-set for integers
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ taskLock();
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ taskUnlock();
+ return true;
+ }
+ taskUnlock();
+ return false;
+}
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch-and-store for integers
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ taskLock();
+ int returnValue = _q_value;
+ _q_value = newValue;
+ taskUnlock();
+ return returnValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch-and-add for integers
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ taskLock();
+ int originalValue = _q_value;
+ _q_value += valueToAdd;
+ taskUnlock();
+ return originalValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+// Test and set for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ taskLock();
+ if (_q_value == expectedValue) {
+ _q_value = newValue;
+ taskUnlock();
+ return true;
+ }
+ taskUnlock();
+ return false;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+// Fetch and store for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ taskLock();
+ T *returnValue = (_q_value);
+ _q_value = newValue;
+ taskUnlock();
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+// Fetch and add for pointers
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ taskLock();
+ T *returnValue = (_q_value);
+ _q_value += valueToAdd;
+ taskUnlock();
+ return returnValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+#endif // generic implementation with taskLock()
+
+QT_END_HEADER
+
+#endif // QATOMIC_VXWORKS_H
diff --git a/src/corelib/arch/qatomic_windows.h b/src/corelib/arch/qatomic_windows.h
new file mode 100644
index 0000000000..538c00cff1
--- /dev/null
+++ b/src/corelib/arch/qatomic_windows.h
@@ -0,0 +1,496 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_WINDOWS_H
+#define QATOMIC_WINDOWS_H
+
+#ifndef Q_CC_MSVC
+
+// Mingw and other GCC platforms get inline assembly
+
+# ifdef __i386__
+# include "QtCore/qatomic_i386.h"
+# else
+# include "QtCore/qatomic_x86_64.h"
+# endif
+
+#else // Q_CC_MSVC
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifndef Q_OS_WINCE
+
+// use compiler intrinsics for all atomic functions
+# define QT_INTERLOCKED_PREFIX _
+# define QT_INTERLOCKED_PROTOTYPE __cdecl
+# define QT_INTERLOCKED_DECLARE_PROTOTYPES
+# define QT_INTERLOCKED_INTRINSIC
+
+#else // Q_OS_WINCE
+
+# if _WIN32_WCE < 0x600 && defined(_X86_)
+// For X86 Windows CE, include winbase.h to catch inline functions which
+// override the regular definitions inside of coredll.dll.
+// Though one could use the original version of Increment/Decrement, others are
+// not exported at all.
+# include <winbase.h>
+
+// It's safer to remove the volatile and let the compiler add it as needed.
+# define QT_INTERLOCKED_NO_VOLATILE
+
+# else // _WIN32_WCE >= 0x600 || !_X86_
+
+# define QT_INTERLOCKED_PROTOTYPE __cdecl
+# define QT_INTERLOCKED_DECLARE_PROTOTYPES
+
+# if _WIN32_WCE >= 0x600
+# if defined(_X86_)
+# define QT_INTERLOCKED_PREFIX _
+# define QT_INTERLOCKED_INTRINSIC
+# endif
+# else
+# define QT_INTERLOCKED_NO_VOLATILE
+# endif
+
+# endif // _WIN32_WCE >= 0x600 || !_X86_
+
+#endif // Q_OS_WINCE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Prototype declaration
+
+#define QT_INTERLOCKED_CONCAT_I(prefix, suffix) \
+ prefix ## suffix
+#define QT_INTERLOCKED_CONCAT(prefix, suffix) \
+ QT_INTERLOCKED_CONCAT_I(prefix, suffix)
+
+// MSVC intrinsics prefix function names with an underscore. Also, if platform
+// SDK headers have been included, the Interlocked names may be defined as
+// macros.
+// To avoid double underscores, we paste the prefix with Interlocked first and
+// then the remainder of the function name.
+#define QT_INTERLOCKED_FUNCTION(name) \
+ QT_INTERLOCKED_CONCAT( \
+ QT_INTERLOCKED_CONCAT(QT_INTERLOCKED_PREFIX, Interlocked), name)
+
+#ifdef QT_INTERLOCKED_NO_VOLATILE
+# define QT_INTERLOCKED_VOLATILE
+# define QT_INTERLOCKED_REMOVE_VOLATILE(a) qt_interlocked_remove_volatile(a)
+#else
+# define QT_INTERLOCKED_VOLATILE volatile
+# define QT_INTERLOCKED_REMOVE_VOLATILE(a) a
+#endif
+
+#ifndef QT_INTERLOCKED_PREFIX
+#define QT_INTERLOCKED_PREFIX
+#endif
+
+#ifndef QT_INTERLOCKED_PROTOTYPE
+#define QT_INTERLOCKED_PROTOTYPE
+#endif
+
+#ifdef QT_INTERLOCKED_DECLARE_PROTOTYPES
+#undef QT_INTERLOCKED_DECLARE_PROTOTYPES
+
+extern "C" {
+
+ long QT_INTERLOCKED_PROTOTYPE QT_INTERLOCKED_FUNCTION( Increment )(long QT_INTERLOCKED_VOLATILE *);
+ long QT_INTERLOCKED_PROTOTYPE QT_INTERLOCKED_FUNCTION( Decrement )(long QT_INTERLOCKED_VOLATILE *);
+ long QT_INTERLOCKED_PROTOTYPE QT_INTERLOCKED_FUNCTION( CompareExchange )(long QT_INTERLOCKED_VOLATILE *, long, long);
+ long QT_INTERLOCKED_PROTOTYPE QT_INTERLOCKED_FUNCTION( Exchange )(long QT_INTERLOCKED_VOLATILE *, long);
+ long QT_INTERLOCKED_PROTOTYPE QT_INTERLOCKED_FUNCTION( ExchangeAdd )(long QT_INTERLOCKED_VOLATILE *, long);
+
+# if !defined(Q_OS_WINCE) && !defined(__i386__) && !defined(_M_IX86)
+ void * QT_INTERLOCKED_FUNCTION( CompareExchangePointer )(void * QT_INTERLOCKED_VOLATILE *, void *, void *);
+ void * QT_INTERLOCKED_FUNCTION( ExchangePointer )(void * QT_INTERLOCKED_VOLATILE *, void *);
+ __int64 QT_INTERLOCKED_FUNCTION( ExchangeAdd64 )(__int64 QT_INTERLOCKED_VOLATILE *, __int64);
+# endif
+
+}
+
+#endif // QT_INTERLOCKED_DECLARE_PROTOTYPES
+
+#undef QT_INTERLOCKED_PROTOTYPE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef QT_INTERLOCKED_INTRINSIC
+#undef QT_INTERLOCKED_INTRINSIC
+
+# pragma intrinsic (_InterlockedIncrement)
+# pragma intrinsic (_InterlockedDecrement)
+# pragma intrinsic (_InterlockedExchange)
+# pragma intrinsic (_InterlockedCompareExchange)
+# pragma intrinsic (_InterlockedExchangeAdd)
+
+# if !defined(Q_OS_WINCE) && !defined(_M_IX86)
+# pragma intrinsic (_InterlockedCompareExchangePointer)
+# pragma intrinsic (_InterlockedExchangePointer)
+# pragma intrinsic (_InterlockedExchangeAdd64)
+# endif
+
+#endif // QT_INTERLOCKED_INTRINSIC
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Interlocked* replacement macros
+
+#define QT_INTERLOCKED_INCREMENT(value) \
+ QT_INTERLOCKED_FUNCTION( Increment )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ) )
+
+#define QT_INTERLOCKED_DECREMENT(value) \
+ QT_INTERLOCKED_FUNCTION( Decrement )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ) )
+
+#define QT_INTERLOCKED_COMPARE_EXCHANGE(value, newValue, expectedValue) \
+ QT_INTERLOCKED_FUNCTION( CompareExchange )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ), \
+ newValue, \
+ expectedValue )
+
+#define QT_INTERLOCKED_EXCHANGE(value, newValue) \
+ QT_INTERLOCKED_FUNCTION( Exchange )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ), \
+ newValue )
+
+#define QT_INTERLOCKED_EXCHANGE_ADD(value, valueToAdd) \
+ QT_INTERLOCKED_FUNCTION( ExchangeAdd )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ), \
+ valueToAdd )
+
+#if defined(Q_OS_WINCE) || defined(__i386__) || defined(_M_IX86)
+
+# define QT_INTERLOCKED_COMPARE_EXCHANGE_POINTER(value, newValue, expectedValue) \
+ reinterpret_cast<void *>( \
+ QT_INTERLOCKED_FUNCTION( CompareExchange )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ## _integral ), \
+ (long)( newValue ), \
+ (long)( expectedValue ) ))
+
+# define QT_INTERLOCKED_EXCHANGE_POINTER(value, newValue) \
+ QT_INTERLOCKED_FUNCTION( Exchange )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ## _integral ), \
+ (quintptr)( newValue ) )
+
+# define QT_INTERLOCKED_EXCHANGE_ADD_POINTER(value, valueToAdd) \
+ QT_INTERLOCKED_FUNCTION( ExchangeAdd )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ## _integral ), \
+ valueToAdd )
+
+#else // !defined(Q_OS_WINCE) && !defined(__i386__) && !defined(_M_IX86)
+
+# define QT_INTERLOCKED_COMPARE_EXCHANGE_POINTER(value, newValue, expectedValue) \
+ QT_INTERLOCKED_FUNCTION( CompareExchangePointer )( \
+ reinterpret_cast<void * QT_INTERLOCKED_VOLATILE *>( QT_INTERLOCKED_REMOVE_VOLATILE( value ) ), \
+ newValue, \
+ expectedValue )
+
+# define QT_INTERLOCKED_EXCHANGE_POINTER(value, newValue) \
+ QT_INTERLOCKED_FUNCTION( ExchangePointer )( \
+ reinterpret_cast<void * QT_INTERLOCKED_VOLATILE *>( QT_INTERLOCKED_REMOVE_VOLATILE( value ) ), \
+ newValue )
+
+# define QT_INTERLOCKED_EXCHANGE_ADD_POINTER(value, valueToAdd) \
+ QT_INTERLOCKED_FUNCTION( ExchangeAdd64 )( \
+ QT_INTERLOCKED_REMOVE_VOLATILE( value ## _integral ), \
+ valueToAdd )
+
+#endif // !defined(Q_OS_WINCE) && !defined(__i386__) && !defined(_M_IX86)
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_WAIT_FREE
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return true; }
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef QT_INTERLOCKED_NO_VOLATILE
+template <class T>
+Q_INLINE_TEMPLATE T *qt_interlocked_remove_volatile(T volatile *t)
+{
+ return const_cast<T *>(t);
+}
+#endif // !QT_INTERLOCKED_NO_VOLATILE
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+inline bool QBasicAtomicInt::ref()
+{
+ return QT_INTERLOCKED_INCREMENT(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return QT_INTERLOCKED_DECREMENT(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ return QT_INTERLOCKED_COMPARE_EXCHANGE(&_q_value, newValue, expectedValue)
+ == expectedValue;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ return QT_INTERLOCKED_EXCHANGE(&_q_value, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ return QT_INTERLOCKED_EXCHANGE_ADD(&_q_value, valueToAdd);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return QT_INTERLOCKED_COMPARE_EXCHANGE_POINTER(&_q_value, newValue, expectedValue)
+ == expectedValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T* newValue)
+{
+ return reinterpret_cast<T *>(
+ QT_INTERLOCKED_EXCHANGE_POINTER(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(
+ QT_INTERLOCKED_EXCHANGE_ADD_POINTER(&_q_value, valueToAdd * sizeof(T)));
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Cleanup
+
+#undef QT_INTERLOCKED_CONCAT_I
+#undef QT_INTERLOCKED_CONCAT
+#undef QT_INTERLOCKED_FUNCTION
+#undef QT_INTERLOCKED_PREFIX
+
+#undef QT_INTERLOCKED_NO_VOLATILE
+#undef QT_INTERLOCKED_VOLATILE
+#undef QT_INTERLOCKED_REMOVE_VOLATILE
+
+#undef QT_INTERLOCKED_INCREMENT
+#undef QT_INTERLOCKED_DECREMENT
+#undef QT_INTERLOCKED_COMPARE_EXCHANGE
+#undef QT_INTERLOCKED_EXCHANGE
+#undef QT_INTERLOCKED_EXCHANGE_ADD
+#undef QT_INTERLOCKED_COMPARE_EXCHANGE_POINTER
+#undef QT_INTERLOCKED_EXCHANGE_POINTER
+#undef QT_INTERLOCKED_EXCHANGE_ADD_POINTER
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // Q_CC_MSVC
+
+#endif // QATOMIC_WINDOWS_H
diff --git a/src/corelib/arch/qatomic_windowsce.h b/src/corelib/arch/qatomic_windowsce.h
new file mode 100644
index 0000000000..25696f9626
--- /dev/null
+++ b/src/corelib/arch/qatomic_windowsce.h
@@ -0,0 +1,56 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef WINDOWSCE_QATOMIC_H
+#define WINDOWSCE_QATOMIC_H
+
+#include <QtCore/qglobal.h>
+QT_BEGIN_HEADER
+
+#if defined(QT_ARCH_WINDOWSCE)
+#include "QtCore/qatomic_windows.h"
+#endif
+
+QT_END_HEADER
+
+#endif // QATOMIC_ARCH_H
+
+
diff --git a/src/corelib/arch/qatomic_x86_64.h b/src/corelib/arch/qatomic_x86_64.h
new file mode 100644
index 0000000000..9303f191ce
--- /dev/null
+++ b/src/corelib/arch/qatomic_x86_64.h
@@ -0,0 +1,363 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QATOMIC_X86_64_H
+#define QATOMIC_X86_64_H
+
+QT_BEGIN_HEADER
+
+QT_BEGIN_NAMESPACE
+
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_REFERENCE_COUNTING_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isReferenceCountingNative()
+{ return true; }
+inline bool QBasicAtomicInt::isReferenceCountingWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_TEST_AND_SET_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isTestAndSetNative()
+{ return true; }
+inline bool QBasicAtomicInt::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_STORE_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndStoreNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_INT_FETCH_AND_ADD_IS_WAIT_FREE
+
+inline bool QBasicAtomicInt::isFetchAndAddNative()
+{ return true; }
+inline bool QBasicAtomicInt::isFetchAndAddWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_TEST_AND_SET_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isTestAndSetWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_STORE_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndStoreWaitFree()
+{ return true; }
+
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_ALWAYS_NATIVE
+#define Q_ATOMIC_POINTER_FETCH_AND_ADD_IS_WAIT_FREE
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddNative()
+{ return true; }
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::isFetchAndAddWaitFree()
+{ return true; }
+
+#if defined(Q_CC_GNU) || defined(Q_CC_INTEL)
+
+inline bool QBasicAtomicInt::ref()
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "incl %0\n"
+ "setne %1"
+ : "=m" (_q_value), "=qm" (ret)
+ : "m" (_q_value)
+ : "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "decl %0\n"
+ "setne %1"
+ : "=m" (_q_value), "=qm" (ret)
+ : "m" (_q_value)
+ : "memory");
+ return ret != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expectedValue, int newValue)
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "cmpxchgl %3,%2\n"
+ "sete %1\n"
+ : "=a" (newValue), "=qm" (ret), "+m" (_q_value)
+ : "r" (newValue), "0" (expectedValue)
+ : "memory");
+ return ret != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newValue)
+{
+ asm volatile("xchgl %0,%1"
+ : "=r" (newValue), "+m" (_q_value)
+ : "0" (newValue)
+ : "memory");
+ return newValue;
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int valueToAdd)
+{
+ asm volatile("lock\n"
+ "xaddl %0,%1"
+ : "=r" (valueToAdd), "+m" (_q_value)
+ : "0" (valueToAdd)
+ : "memory");
+ return valueToAdd;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ unsigned char ret;
+ asm volatile("lock\n"
+ "cmpxchgq %3,%2\n"
+ "sete %1\n"
+ : "=a" (newValue), "=qm" (ret), "+m" (_q_value)
+ : "r" (newValue), "0" (expectedValue)
+ : "memory");
+ return ret != 0;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ asm volatile("xchgq %0,%1"
+ : "=r" (newValue), "+m" (_q_value)
+ : "0" (newValue)
+ : "memory");
+ return newValue;
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ asm volatile("lock\n"
+ "xaddq %0,%1"
+ : "=r" (valueToAdd), "+m" (_q_value)
+ : "0" (valueToAdd * sizeof(T))
+ : "memory");
+ return reinterpret_cast<T *>(valueToAdd);
+}
+
+#else // !Q_CC_INTEL && !Q_CC_GNU
+
+extern "C" {
+ Q_CORE_EXPORT int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval);
+ Q_CORE_EXPORT int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval);
+ Q_CORE_EXPORT int q_atomic_increment(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_decrement(volatile int *ptr);
+ Q_CORE_EXPORT int q_atomic_set_int(volatile int *ptr, int newval);
+ Q_CORE_EXPORT void *q_atomic_set_ptr(volatile void *ptr, void *newval);
+ Q_CORE_EXPORT int q_atomic_fetch_and_add_int(volatile int *ptr, int value);
+ Q_CORE_EXPORT void *q_atomic_fetch_and_add_ptr(volatile void *ptr, qptrdiff value);
+} // extern "C"
+
+inline bool QBasicAtomicInt::ref()
+{
+ return q_atomic_increment(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::deref()
+{
+ return q_atomic_decrement(&_q_value) != 0;
+}
+
+inline bool QBasicAtomicInt::testAndSetOrdered(int expected, int newval)
+{
+ return q_atomic_test_and_set_int(&_q_value, expected, newval) != 0;
+}
+
+inline int QBasicAtomicInt::fetchAndStoreOrdered(int newval)
+{
+ return q_atomic_set_int(&_q_value, newval);
+}
+
+inline int QBasicAtomicInt::fetchAndAddOrdered(int aValue)
+{
+ return q_atomic_fetch_and_add_int(&_q_value, aValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetOrdered(T *expectedValue, T *newValue)
+{
+ return q_atomic_test_and_set_ptr(&_q_value, expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreOrdered(T *newValue)
+{
+ return reinterpret_cast<T *>(q_atomic_set_ptr(&_q_value, newValue));
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddOrdered(qptrdiff valueToAdd)
+{
+ return reinterpret_cast<T *>(q_atomic_fetch_and_add_ptr(&_q_value, valueToAdd * sizeof(T)));
+}
+
+#endif // Q_CC_GNU || Q_CC_INTEL
+
+inline bool QBasicAtomicInt::testAndSetRelaxed(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetAcquire(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline bool QBasicAtomicInt::testAndSetRelease(int expectedValue, int newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelaxed(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreAcquire(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndStoreRelease(int newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelaxed(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddAcquire(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+inline int QBasicAtomicInt::fetchAndAddRelease(int valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelaxed(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetAcquire(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE bool QBasicAtomicPointer<T>::testAndSetRelease(T *expectedValue, T *newValue)
+{
+ return testAndSetOrdered(expectedValue, newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelaxed(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreAcquire(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndStoreRelease(T *newValue)
+{
+ return fetchAndStoreOrdered(newValue);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelaxed(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddAcquire(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+template <typename T>
+Q_INLINE_TEMPLATE T *QBasicAtomicPointer<T>::fetchAndAddRelease(qptrdiff valueToAdd)
+{
+ return fetchAndAddOrdered(valueToAdd);
+}
+
+QT_END_NAMESPACE
+
+QT_END_HEADER
+
+#endif // QATOMIC_X86_64_H
diff --git a/src/corelib/arch/s390/arch.pri b/src/corelib/arch/s390/arch.pri
new file mode 100644
index 0000000000..45cc57824b
--- /dev/null
+++ b/src/corelib/arch/s390/arch.pri
@@ -0,0 +1,3 @@
+#
+# S390 architecture
+#
diff --git a/src/corelib/arch/sh/arch.pri b/src/corelib/arch/sh/arch.pri
new file mode 100644
index 0000000000..67fbb16c10
--- /dev/null
+++ b/src/corelib/arch/sh/arch.pri
@@ -0,0 +1,4 @@
+#
+# SH (Renesas SuperH) architecture
+#
+SOURCES += $$QT_ARCH_CPP/qatomic_sh.cpp
diff --git a/src/corelib/arch/sh/qatomic_sh.cpp b/src/corelib/arch/sh/qatomic_sh.cpp
new file mode 100644
index 0000000000..a27702010a
--- /dev/null
+++ b/src/corelib/arch/sh/qatomic_sh.cpp
@@ -0,0 +1,72 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QtCore/qglobal.h>
+
+#include <unistd.h>
+#ifdef _POSIX_PRIORITY_SCHEDULING
+# include <sched.h>
+#endif
+#include <time.h>
+
+QT_BEGIN_NAMESPACE
+
+QT_USE_NAMESPACE
+
+Q_CORE_EXPORT volatile char qt_atomic_lock = 0;
+
+Q_CORE_EXPORT void qt_atomic_yield(int *count)
+{
+#ifdef _POSIX_PRIORITY_SCHEDULING
+ if((*count)++ < 50) {
+ sched_yield();
+ } else
+#endif
+ {
+ struct timespec tm;
+ tm.tv_sec = 0;
+ tm.tv_nsec = 2000001;
+ nanosleep(&tm, NULL);
+ *count = 0;
+ }
+}
+
+QT_END_NAMESPACE
diff --git a/src/corelib/arch/sh4a/arch.pri b/src/corelib/arch/sh4a/arch.pri
new file mode 100644
index 0000000000..19d5e040a9
--- /dev/null
+++ b/src/corelib/arch/sh4a/arch.pri
@@ -0,0 +1,3 @@
+#
+# SH-4A (Renesas SuperH) architecture
+#
diff --git a/src/corelib/arch/sparc/arch.pri b/src/corelib/arch/sparc/arch.pri
new file mode 100644
index 0000000000..9bb3a888e8
--- /dev/null
+++ b/src/corelib/arch/sparc/arch.pri
@@ -0,0 +1,10 @@
+#
+# SPARC architecture
+#
+*-64* {
+ SOURCES += $$QT_ARCH_CPP/qatomic64.s
+}
+else {
+ SOURCES += $$QT_ARCH_CPP/qatomic32.s \
+ $$QT_ARCH_CPP/qatomic_sparc.cpp
+}
diff --git a/src/corelib/arch/sparc/qatomic32.s b/src/corelib/arch/sparc/qatomic32.s
new file mode 100644
index 0000000000..a242add4bc
--- /dev/null
+++ b/src/corelib/arch/sparc/qatomic32.s
@@ -0,0 +1,103 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!!
+!! Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+!! All rights reserved.
+!! Contact: Nokia Corporation (qt-info@nokia.com)
+!!
+!! This file is part of the QtGui module of the Qt Toolkit.
+!!
+!! $QT_BEGIN_LICENSE:LGPL$
+!! No Commercial Usage
+!! This file contains pre-release code and may not be distributed.
+!! You may use this file in accordance with the terms and conditions
+!! contained in the Technology Preview License Agreement accompanying
+!! this package.
+!!
+!! GNU Lesser General Public License Usage
+!! Alternatively, this file may be used under the terms of the GNU Lesser
+!! General Public License version 2.1 as published by the Free Software
+!! Foundation and appearing in the file LICENSE.LGPL included in the
+!! packaging of this file. Please review the following information to
+!! ensure the GNU Lesser General Public License version 2.1 requirements
+!! will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+!!
+!! In addition, as a special exception, Nokia gives you certain additional
+!! rights. These rights are described in the Nokia Qt LGPL Exception
+!! version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+!!
+!! If you have questions regarding the use of this file, please contact
+!! Nokia at qt-info@nokia.com.
+!!
+!!
+!!
+!!
+!!
+!!
+!!
+!!
+!! $QT_END_LICENSE$
+!!
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ .section ".text"
+
+ .align 4
+ .type q_atomic_trylock_int,#function
+ .global q_atomic_trylock_int
+q_atomic_trylock_int:
+ sethi %hi(-2147483648),%o2
+ swap [%o0],%o2
+ retl
+ mov %o2,%o0
+ .size q_atomic_trylock_int,.-q_atomic_trylock_int
+
+
+
+
+ .align 4
+ .type q_atomic_trylock_ptr,#function
+ .global q_atomic_trylock_ptr
+q_atomic_trylock_ptr:
+ mov -1, %o2
+ swap [%o0], %o2
+ retl
+ mov %o2, %o0
+ .size q_atomic_trylock_ptr,.-q_atomic_trylock_ptr
+
+
+
+
+ .align 4
+ .type q_atomic_unlock,#function
+ .global q_atomic_unlock
+q_atomic_unlock:
+ stbar
+ retl
+ st %o1,[%o0]
+ .size q_atomic_unlock,.-q_atomic_unlock
+
+
+
+
+ .align 4
+ .type q_atomic_set_int,#function
+ .global q_atomic_set_int
+q_atomic_set_int:
+ swap [%o0],%o1
+ stbar
+ retl
+ mov %o1,%o0
+ .size q_atomic_set_int,.-q_atomic_set_int
+
+
+
+
+ .align 4
+ .type q_atomic_set_ptr,#function
+ .global q_atomic_set_ptr
+q_atomic_set_ptr:
+ swap [%o0],%o1
+ stbar
+ retl
+ mov %o1,%o0
+ .size q_atomic_set_ptr,.-q_atomic_set_ptr
+
diff --git a/src/corelib/arch/sparc/qatomic64.s b/src/corelib/arch/sparc/qatomic64.s
new file mode 100644
index 0000000000..f826311da4
--- /dev/null
+++ b/src/corelib/arch/sparc/qatomic64.s
@@ -0,0 +1,327 @@
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+!!
+!! Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+!! All rights reserved.
+!! Contact: Nokia Corporation (qt-info@nokia.com)
+!!
+!! This file is part of the QtGui module of the Qt Toolkit.
+!!
+!! $QT_BEGIN_LICENSE:LGPL$
+!! No Commercial Usage
+!! This file contains pre-release code and may not be distributed.
+!! You may use this file in accordance with the terms and conditions
+!! contained in the Technology Preview License Agreement accompanying
+!! this package.
+!!
+!! GNU Lesser General Public License Usage
+!! Alternatively, this file may be used under the terms of the GNU Lesser
+!! General Public License version 2.1 as published by the Free Software
+!! Foundation and appearing in the file LICENSE.LGPL included in the
+!! packaging of this file. Please review the following information to
+!! ensure the GNU Lesser General Public License version 2.1 requirements
+!! will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+!!
+!! In addition, as a special exception, Nokia gives you certain additional
+!! rights. These rights are described in the Nokia Qt LGPL Exception
+!! version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+!!
+!! If you have questions regarding the use of this file, please contact
+!! Nokia at qt-info@nokia.com.
+!!
+!!
+!!
+!!
+!!
+!!
+!!
+!!
+!! $QT_END_LICENSE$
+!!
+!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ .section ".text"
+
+ .align 4
+ .type q_atomic_test_and_set_int,#function
+ .global q_atomic_test_and_set_int
+q_atomic_test_and_set_int:
+ cas [%o0],%o1,%o2
+ cmp %o1,%o2
+ clr %o0
+ retl
+ move %icc,1,%o0
+ .size q_atomic_test_and_set_int,.-q_atomic_test_and_set_int
+
+ .align 4
+ .type q_atomic_test_and_set_acquire_int,#function
+ .global q_atomic_test_and_set_acquire_int
+q_atomic_test_and_set_acquire_int:
+ cas [%o0],%o1,%o2
+ cmp %o1,%o2
+ clr %o0
+ membar #LoadLoad | #LoadStore
+ retl
+ move %icc,1,%o0
+ .size q_atomic_test_and_set_acquire_int,.-q_atomic_test_and_set_acquire_int
+
+ .align 4
+ .type q_atomic_test_and_set_release_int,#function
+ .global q_atomic_test_and_set_release_int
+q_atomic_test_and_set_release_int:
+ membar #LoadStore | #StoreStore
+ cas [%o0],%o1,%o2
+ cmp %o1,%o2
+ clr %o0
+ retl
+ move %icc,1,%o0
+ .size q_atomic_test_and_set_release_int,.-q_atomic_test_and_set_release_int
+
+ .align 4
+ .type q_atomic_test_and_set_ptr,#function
+ .global q_atomic_test_and_set_ptr
+q_atomic_test_and_set_ptr:
+ casx [%o0],%o1,%o2
+ cmp %o1,%o2
+ clr %o0
+ retl
+ move %icc,1,%o0
+ .size q_atomic_test_and_set_ptr,.-q_atomic_test_and_set_ptr
+
+ .align 4
+ .type q_atomic_increment,#function
+ .global q_atomic_increment
+q_atomic_increment:
+q_atomic_increment_retry:
+ ld [%o0],%o3
+ add %o3,1,%o4
+ cas [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_increment_retry
+ nop
+ cmp %o4,-1
+ clr %o0
+ retl
+ movne %icc,1,%o0
+ .size q_atomic_increment,.-q_atomic_increment
+
+ .align 4
+ .type q_atomic_decrement,#function
+ .global q_atomic_decrement
+q_atomic_decrement:
+q_atomic_decrement_retry:
+ ld [%o0],%o3
+ add %o3,-1,%o4
+ cas [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_decrement_retry
+ nop
+ cmp %o4,1
+ clr %o0
+ retl
+ movne %icc,1,%o0
+ .size q_atomic_decrement,.-q_atomic_decrement
+
+ .align 4
+ .type q_atomic_set_int,#function
+ .global q_atomic_set_int
+q_atomic_set_int:
+q_atomic_set_int_retry:
+ ld [%o0],%o2
+ cas [%o0],%o2,%o1
+ cmp %o2,%o1
+ bne q_atomic_set_int_retry
+ nop
+ retl
+ mov %o1,%o0
+ .size q_atomic_set_int,.-q_atomic_set_int
+
+ .align 4
+ .type q_atomic_set_ptr,#function
+ .global q_atomic_set_ptr
+q_atomic_set_ptr:
+q_atomic_set_ptr_retry:
+ ldx [%o0],%o2
+ casx [%o0],%o2,%o1
+ cmp %o2,%o1
+ bne q_atomic_set_ptr_retry
+ nop
+ retl
+ mov %o1,%o0
+ .size q_atomic_set_ptr,.-q_atomic_set_ptr
+
+ .align 4
+ .type q_atomic_fetch_and_add_int,#function
+ .global q_atomic_fetch_and_add_int
+q_atomic_fetch_and_add_int:
+q_atomic_fetch_and_add_int_retry:
+ ld [%o0],%o3
+ add %o3,%o1,%o4
+ cas [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_fetch_and_add_int_retry
+ nop
+ retl
+ mov %o3,%o0
+ .size q_atomic_fetch_and_add_int,.-q_atomic_fetch_and_add_int
+
+ .align 4
+ .type q_atomic_fetch_and_add_acquire_int,#function
+ .global q_atomic_fetch_and_add_acquire_int
+q_atomic_fetch_and_add_acquire_int:
+q_atomic_fetch_and_add_acquire_int_retry:
+ ld [%o0],%o3
+ add %o3,%o1,%o4
+ cas [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_fetch_and_add_acquire_int_retry
+ nop
+ membar #LoadLoad | #LoadStore
+ retl
+ mov %o3,%o0
+ .size q_atomic_fetch_and_add_acquire_int,.-q_atomic_fetch_and_add_acquire_int
+
+ .align 4
+ .type q_atomic_fetch_and_add_release_int,#function
+ .global q_atomic_fetch_and_add_release_int
+q_atomic_fetch_and_add_release_int:
+q_atomic_fetch_and_add_release_int_retry:
+ membar #LoadStore | #StoreStore
+ ld [%o0],%o3
+ add %o3,%o1,%o4
+ cas [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_fetch_and_add_release_int_retry
+ nop
+ retl
+ mov %o3,%o0
+ .size q_atomic_fetch_and_add_release_int,.-q_atomic_fetch_and_add_release_int
+
+ .align 4
+ .type q_atomic_fetch_and_store_acquire_int,#function
+ .global q_atomic_fetch_and_store_acquire_int
+q_atomic_fetch_and_store_acquire_int:
+q_atomic_fetch_and_store_acquire_int_retry:
+ ld [%o0],%o2
+ cas [%o0],%o2,%o1
+ cmp %o2,%o1
+ bne q_atomic_fetch_and_store_acquire_int_retry
+ nop
+ membar #LoadLoad | #LoadStore
+ retl
+ mov %o1,%o0
+ .size q_atomic_fetch_and_store_acquire_int,.-q_atomic_fetch_and_store_acquire_int
+
+ .align 4
+ .type q_atomic_fetch_and_store_release_int,#function
+ .global q_atomic_fetch_and_store_release_int
+q_atomic_fetch_and_store_release_int:
+q_atomic_fetch_and_store_release_int_retry:
+ membar #LoadStore | #StoreStore
+ ld [%o0],%o2
+ cas [%o0],%o2,%o1
+ cmp %o2,%o1
+ bne q_atomic_fetch_and_store_release_int_retry
+ nop
+ retl
+ mov %o1,%o0
+ .size q_atomic_fetch_and_store_release_int,.-q_atomic_fetch_and_store_release_int
+
+ .align 4
+ .type q_atomic_test_and_set_acquire_ptr,#function
+ .global q_atomic_test_and_set_acquire_ptr
+q_atomic_test_and_set_acquire_ptr:
+ casx [%o0],%o1,%o2
+ cmp %o1,%o2
+ clr %o0
+ membar #LoadLoad | #LoadStore
+ retl
+ move %icc,1,%o0
+ .size q_atomic_test_and_set_acquire_ptr,.-q_atomic_test_and_set_acquire_ptr
+
+ .align 4
+ .type q_atomic_test_and_set_release_ptr,#function
+ .global q_atomic_test_and_set_release_ptr
+q_atomic_test_and_set_release_ptr:
+ membar #LoadStore | #StoreStore
+ casx [%o0],%o1,%o2
+ cmp %o1,%o2
+ clr %o0
+ retl
+ move %icc,1,%o0
+ .size q_atomic_test_and_set_release_ptr,.-q_atomic_test_and_set_release_ptr
+
+ .align 4
+ .type q_atomic_fetch_and_store_acquire_ptr,#function
+ .global q_atomic_fetch_and_store_acquire_ptr
+q_atomic_fetch_and_store_acquire_ptr:
+q_atomic_fetch_and_store_acquire_ptr_retry:
+ ldx [%o0],%o2
+ casx [%o0],%o2,%o1
+ cmp %o2,%o1
+ bne q_atomic_fetch_and_store_acquire_ptr_retry
+ nop
+ membar #LoadLoad | #LoadStore
+ retl
+ mov %o1,%o0
+ .size q_atomic_fetch_and_store_acquire_ptr,.-q_atomic_fetch_and_store_acquire_ptr
+
+ .align 4
+ .type q_atomic_fetch_and_store_release_ptr,#function
+ .global q_atomic_fetch_and_store_release_ptr
+q_atomic_fetch_and_store_release_ptr:
+q_atomic_fetch_and_store_release_ptr_retry:
+ membar #LoadStore | #StoreStore
+ ldx [%o0],%o2
+ casx [%o0],%o2,%o1
+ cmp %o2,%o1
+ bne q_atomic_fetch_and_store_release_ptr_retry
+ nop
+ retl
+ mov %o1,%o0
+ .size q_atomic_fetch_and_store_release_ptr,.-q_atomic_fetch_and_store_release_ptr
+
+ .align 4
+ .type q_atomic_fetch_and_add_ptr,#function
+ .global q_atomic_fetch_and_add_ptr
+q_atomic_fetch_and_add_ptr:
+q_atomic_fetch_and_add_ptr_retry:
+ ldx [%o0],%o3
+ add %o3,%o1,%o4
+ casx [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_fetch_and_add_ptr_retry
+ nop
+ retl
+ mov %o3,%o0
+ .size q_atomic_fetch_and_add_ptr,.-q_atomic_fetch_and_add_ptr
+
+ .align 4
+ .type q_atomic_fetch_and_add_acquire_ptr,#function
+ .global q_atomic_fetch_and_add_acquire_ptr
+q_atomic_fetch_and_add_acquire_ptr:
+q_atomic_fetch_and_add_acquire_ptr_retry:
+ ldx [%o0],%o3
+ add %o3,%o1,%o4
+ casx [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_fetch_and_add_acquire_ptr_retry
+ nop
+ membar #LoadLoad | #LoadStore
+ retl
+ mov %o3,%o0
+ .size q_atomic_fetch_and_add_acquire_ptr,.-q_atomic_fetch_and_add_acquire_ptr
+
+ .align 4
+ .type q_atomic_fetch_and_add_release_ptr,#function
+ .global q_atomic_fetch_and_add_release_ptr
+q_atomic_fetch_and_add_release_ptr:
+q_atomic_fetch_and_add_release_ptr_retry:
+ membar #LoadStore | #StoreStore
+ ldx [%o0],%o3
+ add %o3,%o1,%o4
+ casx [%o0],%o3,%o4
+ cmp %o3,%o4
+ bne q_atomic_fetch_and_add_release_ptr_retry
+ nop
+ retl
+ mov %o3,%o0
+ .size q_atomic_fetch_and_add_release_ptr,.-q_atomic_fetch_and_add_release_ptr
diff --git a/src/corelib/arch/sparc/qatomic_sparc.cpp b/src/corelib/arch/sparc/qatomic_sparc.cpp
new file mode 100644
index 0000000000..ec398e03b6
--- /dev/null
+++ b/src/corelib/arch/sparc/qatomic_sparc.cpp
@@ -0,0 +1,92 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QtCore/qatomic.h>
+
+#include <limits.h>
+#include <sched.h>
+
+extern "C" {
+
+int q_atomic_trylock_int(volatile int *addr);
+int q_atomic_trylock_ptr(volatile void *addr);
+
+Q_CORE_EXPORT int q_atomic_lock_int(volatile int *addr)
+{
+ int returnValue = q_atomic_trylock_int(addr);
+
+ if (returnValue == INT_MIN) {
+ do {
+ // spin until we think we can succeed
+ do {
+ sched_yield();
+ returnValue = *addr;
+ } while (returnValue == INT_MIN);
+
+ // try again
+ returnValue = q_atomic_trylock_int(addr);
+ } while (returnValue == INT_MIN);
+ }
+
+ return returnValue;
+}
+
+Q_CORE_EXPORT int q_atomic_lock_ptr(volatile void *addr)
+{
+ int returnValue = q_atomic_trylock_ptr(addr);
+
+ if (returnValue == -1) {
+ do {
+ // spin until we think we can succeed
+ do {
+ sched_yield();
+ returnValue = *reinterpret_cast<volatile int *>(addr);
+ } while (returnValue == -1);
+
+ // try again
+ returnValue = q_atomic_trylock_ptr(addr);
+ } while (returnValue == -1);
+ }
+
+ return returnValue;
+}
+
+} // extern "C"
diff --git a/src/corelib/arch/symbian/arch.pri b/src/corelib/arch/symbian/arch.pri
new file mode 100644
index 0000000000..2e2a9b6aed
--- /dev/null
+++ b/src/corelib/arch/symbian/arch.pri
@@ -0,0 +1,18 @@
+#
+# Symbian architecture
+#
+SOURCES += $$QT_ARCH_CPP/qatomic_symbian.cpp \
+ $$QT_ARCH_CPP/qatomic_generic_armv6.cpp \
+ $$QT_ARCH_CPP/heap_hybrid.cpp \
+ $$QT_ARCH_CPP/debugfunction.cpp \
+ $$QT_ARCH_CPP/qt_heapsetup_symbian.cpp
+
+HEADERS += $$QT_ARCH_CPP/dla_p.h \
+ $$QT_ARCH_CPP/heap_hybrid_p.h \
+ $$QT_ARCH_CPP/common_p.h \
+ $$QT_ARCH_CPP/page_alloc_p.h \
+ $$QT_ARCH_CPP/slab_p.h \
+ $$QT_ARCH_CPP/qt_hybridHeap_symbian_p.h
+
+exists($${EPOCROOT}epoc32/include/platform/u32std.h):DEFINES += QT_SYMBIAN_HAVE_U32STD_H
+exists($${EPOCROOT}epoc32/include/platform/e32btrace.h):DEFINES += QT_SYMBIAN_HAVE_E32BTRACE_H
diff --git a/src/corelib/arch/symbian/common_p.h b/src/corelib/arch/symbian/common_p.h
new file mode 100644
index 0000000000..852646c250
--- /dev/null
+++ b/src/corelib/arch/symbian/common_p.h
@@ -0,0 +1,106 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __E32_COMMON_H__
+#define __E32_COMMON_H__
+
+#ifdef __KERNEL_MODE__
+#include <e32cmn.h>
+#include <e32panic.h>
+#include "u32std.h"
+#else
+#include <e32std.h>
+#include <e32base.h>
+#include <e32math.h>
+#include <e32svr.h>
+#include <e32ver.h>
+#include <e32hal.h>
+#include <e32panic.h>
+// backport of Symbian^4 allocator to Symbian^3 SDK does not contain u32exec.h
+//#include <u32exec.h>
+#endif
+
+GLREF_C void Panic(TCdtPanic aPanic);
+GLDEF_C void PanicBadArrayIndex();
+GLREF_C TInt __DoConvertNum(TUint, TRadix, TUint, TUint8*&);
+GLREF_C TInt __DoConvertNum(Uint64, TRadix, TUint, TUint8*&);
+
+#ifdef __KERNEL_MODE__
+GLREF_C void KernHeapFault(TCdtPanic aPanic);
+GLREF_C void KHeapCheckThreadState();
+TInt StringLength(const TUint16* aPtr);
+TInt StringLength(const TUint8* aPtr);
+
+#define STD_CLASS Kern
+#define STRING_LENGTH(s) StringLength(s)
+#define STRING_LENGTH_16(s) StringLength(s)
+#define PANIC_CURRENT_THREAD(c,r) Kern::PanicCurrentThread(c, r)
+#define __KERNEL_CHECK_RADIX(r) __ASSERT_ALWAYS(((r)==EDecimal)||((r)==EHex),Panic(EInvalidRadix))
+#define APPEND_BUF_SIZE 10
+#define APPEND_BUF_SIZE_64 20
+#define HEAP_PANIC(r) Kern::Printf("HEAP CORRUPTED %s %d", __FILE__, __LINE__), RHeapK::Fault(r)
+#define GET_PAGE_SIZE(x) x = M::PageSizeInBytes()
+#define DIVISION_BY_ZERO() FAULT()
+
+#ifdef _DEBUG
+#define __CHECK_THREAD_STATE RHeapK::CheckThreadState()
+#else
+#define __CHECK_THREAD_STATE
+#endif
+
+#else
+
+#define STD_CLASS User
+#define STRING_LENGTH(s) User::StringLength(s)
+#define STRING_LENGTH_16(s) User::StringLength(s)
+#define PANIC_CURRENT_THREAD(c,r) User::Panic(c, r)
+#define MEM_COMPARE_16 Mem::Compare
+#define __KERNEL_CHECK_RADIX(r)
+#define APPEND_BUF_SIZE 32
+#define APPEND_BUF_SIZE_64 64
+#define HEAP_PANIC(r) RDebug::Printf("HEAP CORRUPTED %s %d", __FILE__, __LINE__), Panic(r)
+#define GET_PAGE_SIZE(x) UserHal::PageSizeInBytes(x)
+#define DIVISION_BY_ZERO() User::RaiseException(EExcIntegerDivideByZero)
+#define __CHECK_THREAD_STATE
+
+#endif // __KERNEL_MODE__
+
+#endif
diff --git a/src/corelib/arch/symbian/debugfunction.cpp b/src/corelib/arch/symbian/debugfunction.cpp
new file mode 100644
index 0000000000..445f1063a8
--- /dev/null
+++ b/src/corelib/arch/symbian/debugfunction.cpp
@@ -0,0 +1,1143 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qt_hybridheap_symbian_p.h"
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+#define GM (&iGlobalMallocState)
+#define __HEAP_CORRUPTED_TRACE(t,p,l) BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)t, (TUint32)p, (TUint32)l);
+#define __HEAP_CORRUPTED_TEST(c,x, p,l) if (!c) { if (iFlags & (EMonitorMemory+ETraceAllocs) ) __HEAP_CORRUPTED_TRACE(this,p,l) HEAP_PANIC(x); }
+#define __HEAP_CORRUPTED_TEST_STATIC(c,t,x,p,l) if (!c) { if (t && (t->iFlags & (EMonitorMemory+ETraceAllocs) )) __HEAP_CORRUPTED_TRACE(t,p,l) HEAP_PANIC(x); }
+
+TInt RHybridHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
+{
+ TInt r = KErrNone;
+ switch(aFunc)
+ {
+
+ case RAllocator::ECount:
+ struct HeapInfo info;
+ Lock();
+ GetInfo(&info, NULL);
+ *(unsigned*)a1 = info.iFreeN;
+ r = info.iAllocN;
+ Unlock();
+ break;
+
+ case RAllocator::EMarkStart:
+ __DEBUG_ONLY(DoMarkStart());
+ break;
+
+ case RAllocator::EMarkEnd:
+ __DEBUG_ONLY( r = DoMarkEnd((TInt)a1) );
+ break;
+
+ case RAllocator::ECheck:
+ r = DoCheckHeap((SCheckInfo*)a1);
+ break;
+
+ case RAllocator::ESetFail:
+ __DEBUG_ONLY(DoSetAllocFail((TAllocFail)(TInt)a1, (TInt)a2));
+ break;
+
+ case RHybridHeap::EGetFail:
+ __DEBUG_ONLY(r = iFailType);
+ break;
+
+ case RHybridHeap::ESetBurstFail:
+#if _DEBUG
+ {
+ SRAllocatorBurstFail* fail = (SRAllocatorBurstFail*) a2;
+ DoSetAllocFail((TAllocFail)(TInt)a1, fail->iRate, fail->iBurst);
+ }
+#endif
+ break;
+
+ case RHybridHeap::ECheckFailure:
+ // iRand will be incremented for each EFailNext, EBurstFailNext,
+ // EDeterministic and EBurstDeterministic failure.
+ r = iRand;
+ break;
+
+ case RAllocator::ECopyDebugInfo:
+ {
+ TInt nestingLevel = ((SDebugCell*)a1)[-1].nestingLevel;
+ ((SDebugCell*)a2)[-1].nestingLevel = nestingLevel;
+ break;
+ }
+
+ case RHybridHeap::EGetSize:
+ {
+ r = iChunkSize - sizeof(RHybridHeap);
+ break;
+ }
+
+ case RHybridHeap::EGetMaxLength:
+ {
+ r = iMaxLength;
+ break;
+ }
+
+ case RHybridHeap::EGetBase:
+ {
+ *(TAny**)a1 = iBase;
+ break;
+ }
+
+ case RHybridHeap::EAlignInteger:
+ {
+ r = _ALIGN_UP((TInt)a1, iAlign);
+ break;
+ }
+
+ case RHybridHeap::EAlignAddr:
+ {
+ *(TAny**)a2 = (TAny*)_ALIGN_UP((TLinAddr)a1, iAlign);
+ break;
+ }
+
+ case RHybridHeap::EWalk:
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ winfo.iFunction = (TWalkFunc)a1;
+ winfo.iParam = a2;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+ break;
+
+#ifndef __KERNEL_MODE__
+
+ case RHybridHeap::EHybridHeap:
+ {
+ if ( !a1 )
+ return KErrGeneral;
+ STestCommand* cmd = (STestCommand*)a1;
+ switch ( cmd->iCommand )
+ {
+ case EGetConfig:
+ cmd->iConfig.iSlabBits = iSlabConfigBits;
+ cmd->iConfig.iDelayedSlabThreshold = iPageThreshold;
+ cmd->iConfig.iPagePower = iPageThreshold;
+ break;
+
+ case ESetConfig:
+ //
+ // New configuration data for slab and page allocator.
+ // Reset heap to get data into use
+ //
+#if USE_HYBRID_HEAP
+ iSlabConfigBits = cmd->iConfig.iSlabBits & 0x3fff;
+ iSlabInitThreshold = cmd->iConfig.iDelayedSlabThreshold;
+ iPageThreshold = (cmd->iConfig.iPagePower & 0x1f);
+ Reset();
+#endif
+ break;
+
+ case EHeapMetaData:
+ cmd->iData = this;
+ break;
+
+ case ETestData:
+ iTestData = cmd->iData;
+ break;
+
+ default:
+ return KErrNotSupported;
+
+ }
+
+ break;
+ }
+#endif // __KERNEL_MODE
+
+ default:
+ return KErrNotSupported;
+
+ }
+ return r;
+}
+
+void RHybridHeap::Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAllocatorType)
+{
+ //
+ // This function is always called from RHybridHeap::GetInfo.
+ // Actual walk function is called if SWalkInfo pointer is defined
+ //
+ //
+ if ( aInfo )
+ {
+#ifdef __KERNEL_MODE__
+ (void)aAllocatorType;
+#if defined(_DEBUG)
+ if ( aBfrType == EGoodAllocatedCell )
+ aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#endif
+
+#else // __KERNEL_MODE__
+
+ if ( aAllocatorType & (EFullSlab + EPartialFullSlab + EEmptySlab + ESlabSpare) )
+ {
+ if ( aInfo->iHeap )
+ {
+ TUint32 dummy;
+ TInt npages;
+ aInfo->iHeap->DoCheckSlab((slab*)aBfr, aAllocatorType);
+ __HEAP_CORRUPTED_TEST_STATIC(aInfo->iHeap->CheckBitmap(Floor(aBfr, PAGESIZE), PAGESIZE, dummy, npages),
+ aInfo->iHeap, ETHeapBadCellAddress, aBfr, aLth);
+ }
+ if ( aAllocatorType & EPartialFullSlab )
+ WalkPartialFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
+ else if ( aAllocatorType & EFullSlab )
+ WalkFullSlab(aInfo, (slab*)aBfr, aBfrType, aLth);
+ }
+#if defined(_DEBUG)
+ else if ( aBfrType == EGoodAllocatedCell )
+ aInfo->iFunction(aInfo->iParam, aBfrType, ((TUint8*)aBfr+EDebugHdrSize), (aLth-EDebugHdrSize) );
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#else
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, aBfr, aLth );
+#endif
+
+#endif // __KERNEL_MODE
+ }
+}
+
+#ifndef __KERNEL_MODE__
+void RHybridHeap::WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType /*aBfrType*/, TInt /*aLth*/)
+{
+ if ( aInfo )
+ {
+ //
+ // Build bitmap of free buffers in the partial full slab
+ //
+ TUint32 bitmap[4];
+ __HEAP_CORRUPTED_TEST_STATIC( (aInfo->iHeap != NULL), aInfo->iHeap, ETHeapBadCellAddress, 0, aSlab);
+ aInfo->iHeap->BuildPartialSlabBitmap(bitmap, aSlab);
+ //
+ // Find used (allocated) buffers from iPartial full slab
+ //
+ TUint32 h = aSlab->iHeader;
+ TUint32 size = SlabHeaderSize(h);
+ TUint32 count = KMaxSlabPayload / size; // Total buffer count in slab
+ TUint32 i = 0;
+ TUint32 ix = 0;
+ TUint32 bit = 1;
+
+ while ( i < count )
+ {
+
+ if ( bitmap[ix] & bit )
+ {
+ aInfo->iFunction(aInfo->iParam, EGoodFreeCell, &aSlab->iPayload[i*size], size );
+ }
+ else
+ {
+#if defined(_DEBUG)
+ aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
+#else
+ aInfo->iFunction(aInfo->iParam, EGoodAllocatedCell, &aSlab->iPayload[i*size], size );
+#endif
+ }
+ bit <<= 1;
+ if ( bit == 0 )
+ {
+ bit = 1;
+ ix ++;
+ }
+
+ i ++;
+ }
+ }
+
+}
+
+void RHybridHeap::WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt /*aLth*/)
+{
+ if ( aInfo )
+ {
+ TUint32 h = aSlab->iHeader;
+ TUint32 size = SlabHeaderSize(h);
+ TUint32 count = (SlabHeaderUsedm4(h) + 4) / size;
+ TUint32 i = 0;
+ while ( i < count )
+ {
+#if defined(_DEBUG)
+ if ( aBfrType == EGoodAllocatedCell )
+ aInfo->iFunction(aInfo->iParam, aBfrType, (&aSlab->iPayload[i*size]+EDebugHdrSize), (size-EDebugHdrSize) );
+ else
+ aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
+#else
+ aInfo->iFunction(aInfo->iParam, aBfrType, &aSlab->iPayload[i*size], size );
+#endif
+ i ++;
+ }
+ }
+}
+
+void RHybridHeap::BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr)
+{
+ //
+ // Build a bitmap of free buffers in a partial full slab
+ //
+ TInt i;
+ TUint32 bit = 0;
+ TUint32 index;
+ TUint32 h = aSlab->iHeader;
+ TUint32 used = SlabHeaderUsedm4(h)+4;
+ TUint32 size = SlabHeaderSize(h);
+ TInt count = (KMaxSlabPayload / size);
+ TInt free_count = count - (used / size); // Total free buffer count in slab
+ aBitmap[0] = 0, aBitmap[1] = 0, aBitmap[2] = 0, aBitmap[3] = 0;
+ TUint32 offs = (h & 0xff) << 2;
+
+ //
+ // Process first buffer in partial slab free buffer chain
+ //
+ while ( offs )
+ {
+ unsigned char* p = (unsigned char*)Offset(aSlab, offs);
+ __HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, p, aSlab);
+ offs -= sizeof(slabhdr);
+ __HEAP_CORRUPTED_TEST( (offs % size == 0), ETHeapBadCellAddress, p, aSlab);
+ index = (offs / size); // Bit index in bitmap
+ i = 0;
+ while ( i < 4 )
+ {
+ if ( index < 32 )
+ {
+ bit = (1 << index);
+ break;
+ }
+ index -= 32;
+ i ++;
+ }
+
+ __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, p, aSlab); // Buffer already in chain
+
+ aBitmap[i] |= bit;
+ free_count --;
+ offs = ((unsigned)*p) << 2; // Next in free chain
+ }
+
+ __HEAP_CORRUPTED_TEST( (free_count >= 0), ETHeapBadCellAddress, aBfr, aSlab); // free buffer count/size mismatch
+ //
+ // Process next rest of the free buffers which are in the
+ // wilderness (at end of the slab)
+ //
+ index = count - 1;
+ i = index / 32;
+ index = index % 32;
+ while ( free_count && (i >= 0))
+ {
+ bit = (1 << index);
+ __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
+ aBitmap[i] |= bit;
+ if ( index )
+ index --;
+ else
+ {
+ index = 31;
+ i --;
+ }
+ free_count --;
+ }
+
+ if ( aBfr ) // Assure that specified buffer does NOT exist in partial slab free buffer chain
+ {
+ offs = LowBits(aBfr, SLABSIZE);
+ __HEAP_CORRUPTED_TEST( (sizeof(slabhdr) <= offs), ETHeapBadCellAddress, aBfr, aSlab);
+ offs -= sizeof(slabhdr);
+ __HEAP_CORRUPTED_TEST( ((offs % size) == 0), ETHeapBadCellAddress, aBfr, aSlab);
+ index = (offs / size); // Bit index in bitmap
+ i = 0;
+ while ( i < 4 )
+ {
+ if ( index < 32 )
+ {
+ bit = (1 << index);
+ break;
+ }
+ index -= 32;
+ i ++;
+ }
+ __HEAP_CORRUPTED_TEST( ((aBitmap[i] & bit) == 0), ETHeapBadCellAddress, aBfr, aSlab); // Buffer already in chain
+ }
+}
+
+#endif // __KERNEL_MODE__
+
+void RHybridHeap::WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen)
+{
+ (void)aCell;
+ SHeapCellInfo& info = *(SHeapCellInfo*)aPtr;
+ switch(aType)
+ {
+ case EGoodAllocatedCell:
+ {
+ ++info.iTotalAlloc;
+ info.iTotalAllocSize += aLen;
+#if defined(_DEBUG)
+ RHybridHeap& h = *info.iHeap;
+ SDebugCell* DbgCell = (SDebugCell*)((TUint8*)aCell-EDebugHdrSize);
+ if ( DbgCell->nestingLevel == h.iNestingLevel )
+ {
+ if (++info.iLevelAlloc==1)
+ info.iStranded = DbgCell;
+#ifdef __KERNEL_MODE__
+ if (KDebugNum(KSERVER) || KDebugNum(KTESTFAST))
+ {
+ Kern::Printf("LEAKED KERNEL HEAP CELL @ %08x : len=%d", aCell, aLen);
+ TLinAddr base = ((TLinAddr)aCell)&~0x0f;
+ TLinAddr end = ((TLinAddr)aCell)+(TLinAddr)aLen;
+ while(base<end)
+ {
+ const TUint32* p = (const TUint32*)base;
+ Kern::Printf("%08x: %08x %08x %08x %08x", p, p[0], p[1], p[2], p[3]);
+ base += 16;
+ }
+ }
+#endif
+ }
+#endif
+ break;
+ }
+ case EGoodFreeCell:
+ ++info.iTotalFree;
+ break;
+ case EBadAllocatedCellSize:
+ HEAP_PANIC(ETHeapBadAllocatedCellSize);
+ case EBadAllocatedCellAddress:
+ HEAP_PANIC(ETHeapBadAllocatedCellAddress);
+ case EBadFreeCellAddress:
+ HEAP_PANIC(ETHeapBadFreeCellAddress);
+ case EBadFreeCellSize:
+ HEAP_PANIC(ETHeapBadFreeCellSize);
+ default:
+ HEAP_PANIC(ETHeapWalkBadCellType);
+ }
+}
+
+
+TInt RHybridHeap::DoCheckHeap(SCheckInfo* aInfo)
+{
+ (void)aInfo;
+ SHeapCellInfo info;
+ memclr(&info, sizeof(info));
+ info.iHeap = this;
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ DoCheckMallocState(GM); // Check DL heap internal structure
+#ifndef __KERNEL_MODE__
+ TUint32 dummy;
+ TInt npages;
+ __HEAP_CORRUPTED_TEST(CheckBitmap(NULL, 0, dummy, npages), ETHeapBadCellAddress, this, 0); // Check page allocator buffers
+ DoCheckSlabTrees();
+ DoCheckCommittedSize(npages, GM);
+#endif
+ winfo.iFunction = WalkCheckCell;
+ winfo.iParam = &info;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+
+#if defined(_DEBUG)
+ if (!aInfo)
+ return KErrNone;
+ TInt expected = aInfo->iCount;
+ TInt actual = aInfo->iAll ? info.iTotalAlloc : info.iLevelAlloc;
+ if (actual!=expected && !iTestData)
+ {
+#ifdef __KERNEL_MODE__
+ Kern::Fault("KERN-ALLOC COUNT", (expected<<16)|actual );
+#else
+ User::Panic(_L("ALLOC COUNT"), (expected<<16)|actual );
+#endif
+ }
+#endif
+ return KErrNone;
+}
+
+#ifdef _DEBUG
+void RHybridHeap::DoMarkStart()
+{
+ if (iNestingLevel==0)
+ iAllocCount=0;
+ iNestingLevel++;
+}
+
+TUint32 RHybridHeap::DoMarkEnd(TInt aExpected)
+{
+ if (iNestingLevel==0)
+ return 0;
+ SHeapCellInfo info;
+ SHeapCellInfo* p = iTestData ? (SHeapCellInfo*)iTestData : &info;
+ memclr(p, sizeof(info));
+ p->iHeap = this;
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ winfo.iFunction = WalkCheckCell;
+ winfo.iParam = p;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+
+ if (p->iLevelAlloc != aExpected && !iTestData)
+ return (TUint32)(p->iStranded + 1);
+ if (--iNestingLevel == 0)
+ iAllocCount = 0;
+ return 0;
+}
+
+void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate)
+{// Default to a burst mode of 1, as aType may be a burst type.
+ DoSetAllocFail(aType, aRate, 1);
+}
+
+void ResetAllocCellLevels(TAny* aPtr, RHybridHeap::TCellType aType, TAny* aCell, TInt aLen)
+{
+ (void)aPtr;
+ (void)aLen;
+
+ if (aType == RHybridHeap::EGoodAllocatedCell)
+ {
+ RHybridHeap::SDebugCell* DbgCell = (RHybridHeap::SDebugCell*)((TUint8*)aCell-RHybridHeap::EDebugHdrSize);
+ DbgCell->nestingLevel = 0;
+ }
+}
+
+// Don't change as the ETHeapBadDebugFailParameter check below and the API
+// documentation rely on this being 16 for RHybridHeap.
+LOCAL_D const TInt KBurstFailRateShift = 16;
+LOCAL_D const TInt KBurstFailRateMask = (1 << KBurstFailRateShift) - 1;
+
+void RHybridHeap::DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst)
+{
+ if (aType==EReset)
+ {
+ // reset levels of all allocated cells to 0
+ // this should prevent subsequent tests failing unnecessarily
+ iFailed = EFalse; // Reset for ECheckFailure relies on this.
+ struct HeapInfo hinfo;
+ SWalkInfo winfo;
+ Lock();
+ winfo.iFunction = (TWalkFunc)&ResetAllocCellLevels;
+ winfo.iParam = NULL;
+ winfo.iHeap = (RHybridHeap*)this;
+ GetInfo(&hinfo, &winfo);
+ Unlock();
+ // reset heap allocation mark as well
+ iNestingLevel=0;
+ iAllocCount=0;
+ aType=ENone;
+ }
+
+ switch (aType)
+ {
+ case EBurstRandom:
+ case EBurstTrueRandom:
+ case EBurstDeterministic:
+ case EBurstFailNext:
+ // If the fail type is a burst type then iFailRate is split in 2:
+ // the 16 lsbs are the fail rate and the 16 msbs are the burst length.
+ if (TUint(aRate) > (TUint)KMaxTUint16 || aBurst > KMaxTUint16)
+ HEAP_PANIC(ETHeapBadDebugFailParameter);
+
+ iFailed = EFalse;
+ iFailType = aType;
+ iFailRate = (aRate == 0) ? 1 : aRate;
+ iFailAllocCount = -iFailRate;
+ iFailRate = iFailRate | (aBurst << KBurstFailRateShift);
+ break;
+
+ default:
+ iFailed = EFalse;
+ iFailType = aType;
+ iFailRate = (aRate == 0) ? 1 : aRate; // A rate of <1 is meaningless
+ iFailAllocCount = 0;
+ break;
+ }
+
+ // Set up iRand for either:
+ // - random seed value, or
+ // - a count of the number of failures so far.
+ iRand = 0;
+#ifndef __KERNEL_MODE__
+ switch (iFailType)
+ {
+ case ETrueRandom:
+ case EBurstTrueRandom:
+ {
+ TTime time;
+ time.HomeTime();
+ TInt64 seed = time.Int64();
+ iRand = Math::Rand(seed);
+ break;
+ }
+ case ERandom:
+ case EBurstRandom:
+ {
+ TInt64 seed = 12345;
+ iRand = Math::Rand(seed);
+ break;
+ }
+ default:
+ break;
+ }
+#endif
+}
+
+TBool RHybridHeap::CheckForSimulatedAllocFail()
+//
+// Check to see if the user has requested simulated alloc failure, and if so possibly
+// Return ETrue indicating a failure.
+//
+{
+ // For burst mode failures iFailRate is shared
+ TUint16 rate = (TUint16)(iFailRate & KBurstFailRateMask);
+ TUint16 burst = (TUint16)(iFailRate >> KBurstFailRateShift);
+ TBool r = EFalse;
+ switch (iFailType)
+ {
+#ifndef __KERNEL_MODE__
+ case ERandom:
+ case ETrueRandom:
+ if (++iFailAllocCount>=iFailRate)
+ {
+ iFailAllocCount=0;
+ if (!iFailed) // haven't failed yet after iFailRate allocations so fail now
+ return(ETrue);
+ iFailed=EFalse;
+ }
+ else
+ {
+ if (!iFailed)
+ {
+ TInt64 seed=iRand;
+ iRand=Math::Rand(seed);
+ if (iRand%iFailRate==0)
+ {
+ iFailed=ETrue;
+ return(ETrue);
+ }
+ }
+ }
+ break;
+
+ case EBurstRandom:
+ case EBurstTrueRandom:
+ if (++iFailAllocCount < 0)
+ {
+ // We haven't started failing yet so should we now?
+ TInt64 seed = iRand;
+ iRand = Math::Rand(seed);
+ if (iRand % rate == 0)
+ {// Fail now. Reset iFailAllocCount so we fail burst times
+ iFailAllocCount = 0;
+ r = ETrue;
+ }
+ }
+ else
+ {
+ if (iFailAllocCount < burst)
+ {// Keep failing for burst times
+ r = ETrue;
+ }
+ else
+ {// We've now failed burst times so start again.
+ iFailAllocCount = -(rate - 1);
+ }
+ }
+ break;
+#endif
+ case EDeterministic:
+ if (++iFailAllocCount%iFailRate==0)
+ {
+ r=ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ case EBurstDeterministic:
+ // This will fail burst number of times, every rate attempts.
+ if (++iFailAllocCount >= 0)
+ {
+ if (iFailAllocCount == burst - 1)
+ {// This is the burst time we have failed so make it the last by
+ // reseting counts so we next fail after rate attempts.
+ iFailAllocCount = -rate;
+ }
+ r = ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ case EFailNext:
+ if ((++iFailAllocCount%iFailRate)==0)
+ {
+ iFailType=ENone;
+ r=ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ case EBurstFailNext:
+ if (++iFailAllocCount >= 0)
+ {
+ if (iFailAllocCount == burst - 1)
+ {// This is the burst time we have failed so make it the last.
+ iFailType = ENone;
+ }
+ r = ETrue;
+ iRand++; // Keep count of how many times we have failed
+ }
+ break;
+
+ default:
+ break;
+ }
+ return r;
+}
+
+#endif // DEBUG
+
+//
+// Methods for Doug Lea allocator detailed check
+//
+
+void RHybridHeap::DoCheckAnyChunk(mstate m, mchunkptr p)
+{
+ __HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p, 0);
+ (void)m;
+}
+
+/* Check properties of iTop chunk */
+void RHybridHeap::DoCheckTopChunk(mstate m, mchunkptr p)
+{
+ msegmentptr sp = &m->iSeg;
+ size_t sz = CHUNKSIZE(p);
+ __HEAP_CORRUPTED_TEST((sp != 0), ETHeapBadCellAddress, p, 0);
+ __HEAP_CORRUPTED_TEST(((IS_ALIGNED(CHUNK2MEM(p))) || (p->iHead == FENCEPOST_HEAD)), ETHeapBadCellAddress, p,0);
+ __HEAP_CORRUPTED_TEST((sz == m->iTopSize), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz > 0), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz == ((sp->iBase + sp->iSize) - (TUint8*)p) - TOP_FOOT_SIZE), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
+}
+
+/* Check properties of inuse chunks */
+void RHybridHeap::DoCheckInuseChunk(mstate m, mchunkptr p)
+{
+ DoCheckAnyChunk(m, p);
+ __HEAP_CORRUPTED_TEST((CINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
+ /* If not PINUSE and not mmapped, previous chunk has OK offset */
+ __HEAP_CORRUPTED_TEST((PINUSE(p) || NEXT_CHUNK(PREV_CHUNK(p)) == p), ETHeapBadCellAddress,p,0);
+}
+
+/* Check properties of free chunks */
+void RHybridHeap::DoCheckFreeChunk(mstate m, mchunkptr p)
+{
+ size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
+ mchunkptr next = CHUNK_PLUS_OFFSET(p, sz);
+ DoCheckAnyChunk(m, p);
+ __HEAP_CORRUPTED_TEST((!CINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(p)), ETHeapBadCellAddress,p,0);
+ if (p != m->iDv && p != m->iTop)
+ {
+ if (sz >= MIN_CHUNK_SIZE)
+ {
+ __HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((IS_ALIGNED(CHUNK2MEM(p))), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((next->iPrevFoot == sz), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((PINUSE(p)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST( (next == m->iTop || CINUSE(next)), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((p->iFd->iBk == p), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((p->iBk->iFd == p), ETHeapBadCellAddress,p,0);
+ }
+ else /* markers are always of size SIZE_T_SIZE */
+ __HEAP_CORRUPTED_TEST((sz == SIZE_T_SIZE), ETHeapBadCellAddress,p,0);
+ }
+}
+
+/* Check properties of malloced chunks at the point they are malloced */
+void RHybridHeap::DoCheckMallocedChunk(mstate m, void* mem, size_t s)
+{
+ if (mem != 0)
+ {
+ mchunkptr p = MEM2CHUNK(mem);
+ size_t sz = p->iHead & ~(PINUSE_BIT|CINUSE_BIT);
+ DoCheckInuseChunk(m, p);
+ __HEAP_CORRUPTED_TEST(((sz & CHUNK_ALIGN_MASK) == 0), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((sz >= s), ETHeapBadCellAddress,p,0);
+ /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
+ __HEAP_CORRUPTED_TEST((sz < (s + MIN_CHUNK_SIZE)), ETHeapBadCellAddress,p,0);
+ }
+}
+
+/* Check a tree and its subtrees. */
+void RHybridHeap::DoCheckTree(mstate m, tchunkptr t)
+{
+ tchunkptr head = 0;
+ tchunkptr u = t;
+ bindex_t tindex = t->iIndex;
+ size_t tsize = CHUNKSIZE(t);
+ bindex_t idx;
+ DoComputeTreeIndex(tsize, idx);
+ __HEAP_CORRUPTED_TEST((tindex == idx), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((tsize >= MIN_LARGE_SIZE), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((tsize >= MINSIZE_FOR_TREE_INDEX(idx)), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST(((idx == NTREEBINS-1) || (tsize < MINSIZE_FOR_TREE_INDEX((idx+1)))), ETHeapBadCellAddress,u,0);
+
+ do
+ { /* traverse through chain of same-sized nodes */
+ DoCheckAnyChunk(m, ((mchunkptr)u));
+ __HEAP_CORRUPTED_TEST((u->iIndex == tindex), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((CHUNKSIZE(u) == tsize), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((!CINUSE(u)), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((!NEXT_PINUSE(u)), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iFd->iBk == u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iBk->iFd == u), ETHeapBadCellAddress,u,0);
+ if (u->iParent == 0)
+ {
+ __HEAP_CORRUPTED_TEST((u->iChild[0] == 0), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iChild[1] == 0), ETHeapBadCellAddress,u,0);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((head == 0), ETHeapBadCellAddress,u,0); /* only one node on chain has iParent */
+ head = u;
+ __HEAP_CORRUPTED_TEST((u->iParent != u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST( (u->iParent->iChild[0] == u ||
+ u->iParent->iChild[1] == u ||
+ *((tbinptr*)(u->iParent)) == u), ETHeapBadCellAddress,u,0);
+ if (u->iChild[0] != 0)
+ {
+ __HEAP_CORRUPTED_TEST((u->iChild[0]->iParent == u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iChild[0] != u), ETHeapBadCellAddress,u,0);
+ DoCheckTree(m, u->iChild[0]);
+ }
+ if (u->iChild[1] != 0)
+ {
+ __HEAP_CORRUPTED_TEST((u->iChild[1]->iParent == u), ETHeapBadCellAddress,u,0);
+ __HEAP_CORRUPTED_TEST((u->iChild[1] != u), ETHeapBadCellAddress,u,0);
+ DoCheckTree(m, u->iChild[1]);
+ }
+ if (u->iChild[0] != 0 && u->iChild[1] != 0)
+ {
+ __HEAP_CORRUPTED_TEST((CHUNKSIZE(u->iChild[0]) < CHUNKSIZE(u->iChild[1])), ETHeapBadCellAddress,u,0);
+ }
+ }
+ u = u->iFd;
+ }
+ while (u != t);
+ __HEAP_CORRUPTED_TEST((head != 0), ETHeapBadCellAddress,u,0);
+}
+
+/* Check all the chunks in a treebin. */
+void RHybridHeap::DoCheckTreebin(mstate m, bindex_t i)
+{
+ tbinptr* tb = TREEBIN_AT(m, i);
+ tchunkptr t = *tb;
+ int empty = (m->iTreeMap & (1U << i)) == 0;
+ if (t == 0)
+ __HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,t,0);
+ if (!empty)
+ DoCheckTree(m, t);
+}
+
+/* Check all the chunks in a smallbin. */
+void RHybridHeap::DoCheckSmallbin(mstate m, bindex_t i)
+{
+ sbinptr b = SMALLBIN_AT(m, i);
+ mchunkptr p = b->iBk;
+ unsigned int empty = (m->iSmallMap & (1U << i)) == 0;
+ if (p == b)
+ __HEAP_CORRUPTED_TEST((empty), ETHeapBadCellAddress,p,0);
+ if (!empty)
+ {
+ for (; p != b; p = p->iBk)
+ {
+ size_t size = CHUNKSIZE(p);
+ mchunkptr q;
+ /* each chunk claims to be free */
+ DoCheckFreeChunk(m, p);
+ /* chunk belongs in bin */
+ __HEAP_CORRUPTED_TEST((SMALL_INDEX(size) == i), ETHeapBadCellAddress,p,0);
+ __HEAP_CORRUPTED_TEST((p->iBk == b || CHUNKSIZE(p->iBk) == CHUNKSIZE(p)), ETHeapBadCellAddress,p,0);
+ /* chunk is followed by an inuse chunk */
+ q = NEXT_CHUNK(p);
+ if (q->iHead != FENCEPOST_HEAD)
+ DoCheckInuseChunk(m, q);
+ }
+ }
+}
+
+/* Find x in a bin. Used in other check functions. */
+TInt RHybridHeap::BinFind(mstate m, mchunkptr x)
+{
+ size_t size = CHUNKSIZE(x);
+ if (IS_SMALL(size))
+ {
+ bindex_t sidx = SMALL_INDEX(size);
+ sbinptr b = SMALLBIN_AT(m, sidx);
+ if (SMALLMAP_IS_MARKED(m, sidx))
+ {
+ mchunkptr p = b;
+ do
+ {
+ if (p == x)
+ return 1;
+ }
+ while ((p = p->iFd) != b);
+ }
+ }
+ else
+ {
+ bindex_t tidx;
+ DoComputeTreeIndex(size, tidx);
+ if (TREEMAP_IS_MARKED(m, tidx))
+ {
+ tchunkptr t = *TREEBIN_AT(m, tidx);
+ size_t sizebits = size << LEFTSHIFT_FOR_TREE_INDEX(tidx);
+ while (t != 0 && CHUNKSIZE(t) != size)
+ {
+ t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ sizebits <<= 1;
+ }
+ if (t != 0)
+ {
+ tchunkptr u = t;
+ do
+ {
+ if (u == (tchunkptr)x)
+ return 1;
+ }
+ while ((u = u->iFd) != t);
+ }
+ }
+ }
+ return 0;
+}
+
+/* Traverse each chunk and check it; return total */
+size_t RHybridHeap::TraverseAndCheck(mstate m)
+{
+ size_t sum = 0;
+ msegmentptr s = &m->iSeg;
+ sum += m->iTopSize + TOP_FOOT_SIZE;
+ mchunkptr q = ALIGN_AS_CHUNK(s->iBase);
+ mchunkptr lastq = 0;
+ __HEAP_CORRUPTED_TEST((PINUSE(q)), ETHeapBadCellAddress,q,0);
+ while (q != m->iTop && q->iHead != FENCEPOST_HEAD)
+ {
+ sum += CHUNKSIZE(q);
+ if (CINUSE(q))
+ {
+ __HEAP_CORRUPTED_TEST((!BinFind(m, q)), ETHeapBadCellAddress,q,0);
+ DoCheckInuseChunk(m, q);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((q == m->iDv || BinFind(m, q)), ETHeapBadCellAddress,q,0);
+ __HEAP_CORRUPTED_TEST((lastq == 0 || CINUSE(lastq)), ETHeapBadCellAddress,q,0); /* Not 2 consecutive free */
+ DoCheckFreeChunk(m, q);
+ }
+ lastq = q;
+ q = NEXT_CHUNK(q);
+ }
+ return sum;
+}
+
+/* Check all properties of malloc_state. */
+void RHybridHeap::DoCheckMallocState(mstate m)
+{
+ bindex_t i;
+// size_t total;
+ /* check bins */
+ for (i = 0; i < NSMALLBINS; ++i)
+ DoCheckSmallbin(m, i);
+ for (i = 0; i < NTREEBINS; ++i)
+ DoCheckTreebin(m, i);
+
+ if (m->iDvSize != 0)
+ { /* check iDv chunk */
+ DoCheckAnyChunk(m, m->iDv);
+ __HEAP_CORRUPTED_TEST((m->iDvSize == CHUNKSIZE(m->iDv)), ETHeapBadCellAddress,m->iDv,0);
+ __HEAP_CORRUPTED_TEST((m->iDvSize >= MIN_CHUNK_SIZE), ETHeapBadCellAddress,m->iDv,0);
+ __HEAP_CORRUPTED_TEST((BinFind(m, m->iDv) == 0), ETHeapBadCellAddress,m->iDv,0);
+ }
+
+ if (m->iTop != 0)
+ { /* check iTop chunk */
+ DoCheckTopChunk(m, m->iTop);
+ __HEAP_CORRUPTED_TEST((m->iTopSize == CHUNKSIZE(m->iTop)), ETHeapBadCellAddress,m->iTop,0);
+ __HEAP_CORRUPTED_TEST((m->iTopSize > 0), ETHeapBadCellAddress,m->iTop,0);
+ __HEAP_CORRUPTED_TEST((BinFind(m, m->iTop) == 0), ETHeapBadCellAddress,m->iTop,0);
+ }
+
+// total =
+ TraverseAndCheck(m);
+}
+
+#ifndef __KERNEL_MODE__
+//
+// Methods for Slab allocator detailed check
+//
+void RHybridHeap::DoCheckSlabTree(slab** aS, TBool aPartialPage)
+{
+ slab* s = *aS;
+ if (!s)
+ return;
+
+ TUint size = SlabHeaderSize(s->iHeader);
+ slab** parent = aS;
+ slab** child2 = &s->iChild2;
+
+ while ( s )
+ {
+ __HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
+
+ if ( aPartialPage )
+ {
+ if ( s->iChild1 )
+ size = SlabHeaderSize(s->iChild1->iHeader);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
+ }
+ parent = &s->iChild1;
+ s = s->iChild1;
+
+ }
+
+ parent = child2;
+ s = *child2;
+
+ while ( s )
+ {
+ __HEAP_CORRUPTED_TEST((s->iParent == parent), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild1 || s < s->iChild1), ETHeapBadCellAddress,s,SLABSIZE);
+ __HEAP_CORRUPTED_TEST((!s->iChild2 || s < s->iChild2), ETHeapBadCellAddress,s,SLABSIZE);
+
+ if ( aPartialPage )
+ {
+ if ( s->iChild2 )
+ size = SlabHeaderSize(s->iChild2->iHeader);
+ }
+ else
+ {
+ __HEAP_CORRUPTED_TEST((SlabHeaderSize(s->iHeader) == size), ETHeapBadCellAddress,s,SLABSIZE);
+ }
+ parent = &s->iChild2;
+ s = s->iChild2;
+
+ }
+
+}
+
+void RHybridHeap::DoCheckSlabTrees()
+{
+ for (TInt i = 0; i < (MAXSLABSIZE>>2); ++i)
+ DoCheckSlabTree(&iSlabAlloc[i].iPartial, EFalse);
+ DoCheckSlabTree(&iPartialPage, ETrue);
+}
+
+void RHybridHeap::DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr)
+{
+ if ( (aSlabType == ESlabSpare) || (aSlabType == EEmptySlab) )
+ return;
+
+ unsigned h = aSlab->iHeader;
+ __HEAP_CORRUPTED_TEST((ZEROBITS(h)), ETHeapBadCellAddress,aBfr,aSlab);
+ unsigned used = SlabHeaderUsedm4(h)+4;
+ unsigned size = SlabHeaderSize(h);
+ __HEAP_CORRUPTED_TEST( (used < SLABSIZE),ETHeapBadCellAddress, aBfr, aSlab);
+ __HEAP_CORRUPTED_TEST( ((size > 3 ) && (size <= MAXSLABSIZE)), ETHeapBadCellAddress,aBfr,aSlab);
+ unsigned count = 0;
+
+ switch ( aSlabType )
+ {
+ case EFullSlab:
+ count = (KMaxSlabPayload / size );
+ __HEAP_CORRUPTED_TEST((used == count*size), ETHeapBadCellAddress,aBfr,aSlab);
+ __HEAP_CORRUPTED_TEST((HeaderFloating(h)), ETHeapBadCellAddress,aBfr,aSlab);
+ break;
+
+ case EPartialFullSlab:
+ __HEAP_CORRUPTED_TEST(((used % size)==0),ETHeapBadCellAddress,aBfr,aSlab);
+ __HEAP_CORRUPTED_TEST(((SlabHeaderFree(h) == 0) || (((SlabHeaderFree(h)<<2)-sizeof(slabhdr)) % SlabHeaderSize(h) == 0)),
+ ETHeapBadCellAddress,aBfr,aSlab);
+ break;
+
+ default:
+ break;
+
+ }
+}
+
+//
+// Check that committed size in heap equals number of pages in bitmap
+// plus size of Doug Lea region
+//
+void RHybridHeap::DoCheckCommittedSize(TInt aNPages, mstate aM)
+{
+ TInt total_committed = (aNPages * iPageSize) + aM->iSeg.iSize + (iBase - (TUint8*)this);
+ __HEAP_CORRUPTED_TEST((total_committed == iChunkSize), ETHeapBadCellAddress,total_committed,iChunkSize);
+}
+
+#endif // __KERNEL_MODE__
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
diff --git a/src/corelib/arch/symbian/dla_p.h b/src/corelib/arch/symbian/dla_p.h
new file mode 100644
index 0000000000..c2fa99a087
--- /dev/null
+++ b/src/corelib/arch/symbian/dla_p.h
@@ -0,0 +1,969 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __DLA__
+#define __DLA__
+
+#define DEFAULT_TRIM_THRESHOLD ((size_t)4U * (size_t)1024U)
+
+#define MSPACES 0
+#define HAVE_MORECORE 1
+#define MORECORE_CONTIGUOUS 1
+#define HAVE_MMAP 0
+#define HAVE_MREMAP 0
+#define DEFAULT_GRANULARITY (4096U)
+#define FOOTERS 0
+#define USE_LOCKS 0
+#define INSECURE 1
+#define NO_MALLINFO 0
+
+#define LACKS_SYS_TYPES_H
+#ifndef LACKS_SYS_TYPES_H
+#include <sys/types.h> /* For size_t */
+#else
+#ifndef _SIZE_T_DECLARED
+typedef unsigned int size_t;
+#define _SIZE_T_DECLARED
+#endif
+#endif /* LACKS_SYS_TYPES_H */
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T (~(size_t)0)
+
+#ifndef ONLY_MSPACES
+ #define ONLY_MSPACES 0
+#endif /* ONLY_MSPACES */
+
+#ifndef MSPACES
+ #if ONLY_MSPACES
+ #define MSPACES 1
+ #else /* ONLY_MSPACES */
+ #define MSPACES 0
+ #endif /* ONLY_MSPACES */
+#endif /* MSPACES */
+
+//#ifndef MALLOC_ALIGNMENT
+// #define MALLOC_ALIGNMENT ((size_t)8U)
+//#endif /* MALLOC_ALIGNMENT */
+
+#ifndef FOOTERS
+ #define FOOTERS 0
+#endif /* FOOTERS */
+
+#ifndef ABORT
+// #define ABORT abort()
+// #define ABORT User::Invariant()// redefined so euser isn't dependant on oe
+ #define ABORT HEAP_PANIC(ETHeapBadCellAddress)
+#endif /* ABORT */
+
+#ifndef PROCEED_ON_ERROR
+ #define PROCEED_ON_ERROR 0
+#endif /* PROCEED_ON_ERROR */
+
+#ifndef USE_LOCKS
+ #define USE_LOCKS 0
+#endif /* USE_LOCKS */
+
+#ifndef INSECURE
+ #define INSECURE 0
+#endif /* INSECURE */
+
+#ifndef HAVE_MMAP
+ #define HAVE_MMAP 1
+#endif /* HAVE_MMAP */
+
+#ifndef MMAP_CLEARS
+ #define MMAP_CLEARS 1
+#endif /* MMAP_CLEARS */
+
+#ifndef HAVE_MREMAP
+ #ifdef linux
+ #define HAVE_MREMAP 1
+ #else /* linux */
+ #define HAVE_MREMAP 0
+ #endif /* linux */
+#endif /* HAVE_MREMAP */
+
+#ifndef MALLOC_FAILURE_ACTION
+ //#define MALLOC_FAILURE_ACTION errno = ENOMEM;
+ #define MALLOC_FAILURE_ACTION ;
+#endif /* MALLOC_FAILURE_ACTION */
+
+#ifndef HAVE_MORECORE
+ #if ONLY_MSPACES
+ #define HAVE_MORECORE 1 /*AMOD: has changed */
+ #else /* ONLY_MSPACES */
+ #define HAVE_MORECORE 1
+ #endif /* ONLY_MSPACES */
+#endif /* HAVE_MORECORE */
+
+#if !HAVE_MORECORE
+ #define MORECORE_CONTIGUOUS 0
+#else /* !HAVE_MORECORE */
+ #ifndef MORECORE
+ #define MORECORE DLAdjust
+ #endif /* MORECORE */
+ #ifndef MORECORE_CONTIGUOUS
+ #define MORECORE_CONTIGUOUS 0
+ #endif /* MORECORE_CONTIGUOUS */
+#endif /* !HAVE_MORECORE */
+
+#ifndef DEFAULT_GRANULARITY
+ #if MORECORE_CONTIGUOUS
+ #define DEFAULT_GRANULARITY 4096 /* 0 means to compute in init_mparams */
+ #else /* MORECORE_CONTIGUOUS */
+ #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+ #endif /* MORECORE_CONTIGUOUS */
+#endif /* DEFAULT_GRANULARITY */
+
+#ifndef DEFAULT_TRIM_THRESHOLD
+ #ifndef MORECORE_CANNOT_TRIM
+ #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+ #else /* MORECORE_CANNOT_TRIM */
+ #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+ #endif /* MORECORE_CANNOT_TRIM */
+#endif /* DEFAULT_TRIM_THRESHOLD */
+
+#ifndef DEFAULT_MMAP_THRESHOLD
+ #if HAVE_MMAP
+ #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+ #else /* HAVE_MMAP */
+ #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+ #endif /* HAVE_MMAP */
+#endif /* DEFAULT_MMAP_THRESHOLD */
+
+#ifndef USE_BUILTIN_FFS
+ #define USE_BUILTIN_FFS 0
+#endif /* USE_BUILTIN_FFS */
+
+#ifndef USE_DEV_RANDOM
+ #define USE_DEV_RANDOM 0
+#endif /* USE_DEV_RANDOM */
+
+#ifndef NO_MALLINFO
+ #define NO_MALLINFO 0
+#endif /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+ #define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+
+/*
+ mallopt tuning options. SVID/XPG defines four standard parameter
+ numbers for mallopt, normally defined in malloc.h. None of these
+ are used in this malloc, so setting them has no effect. But this
+ malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD (-1)
+#define M_GRANULARITY (-2)
+#define M_MMAP_THRESHOLD (-3)
+
+#if !NO_MALLINFO
+/*
+ This version of malloc supports the standard SVID/XPG mallinfo
+ routine that returns a struct containing usage properties and
+ statistics. It should work on any system that has a
+ /usr/include/malloc.h defining struct mallinfo. The main
+ declaration needed is the mallinfo struct that is returned (by-copy)
+ by mallinfo(). The malloinfo struct contains a bunch of fields that
+ are not even meaningful in this version of malloc. These fields are
+ are instead filled by mallinfo() with other numbers that might be of
+ interest.
+
+ HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+ /usr/include/malloc.h file that includes a declaration of struct
+ mallinfo. If so, it is included; else a compliant version is
+ declared below. These must be precisely the same for mallinfo() to
+ work. The original SVID version of this struct, defined on most
+ systems with mallinfo, declares all fields as ints. But some others
+ define as unsigned long. If your system defines the fields using a
+ type of different width than listed here, you MUST #include your
+ system version and #define HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+
+struct mallinfo {
+ MALLINFO_FIELD_TYPE iArena; /* non-mmapped space allocated from system */
+ MALLINFO_FIELD_TYPE iOrdblks; /* number of free chunks */
+ MALLINFO_FIELD_TYPE iSmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE iHblks; /* always 0 */
+ MALLINFO_FIELD_TYPE iHblkhd; /* space in mmapped regions */
+ MALLINFO_FIELD_TYPE iUsmblks; /* maximum total allocated space */
+ MALLINFO_FIELD_TYPE iFsmblks; /* always 0 */
+ MALLINFO_FIELD_TYPE iUordblks; /* total allocated space */
+ MALLINFO_FIELD_TYPE iFordblks; /* total free space */
+ MALLINFO_FIELD_TYPE iKeepcost; /* releasable (via malloc_trim) space */
+ MALLINFO_FIELD_TYPE iCellCount;/* Number of chunks allocated*/
+};
+
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
+
+#if MSPACES
+ typedef void* mspace;
+#endif /* MSPACES */
+
+#if 0
+
+#include <stdio.h>/* for printing in malloc_stats */
+
+#ifndef LACKS_ERRNO_H
+ #include <errno.h> /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+
+#if FOOTERS
+ #include <time.h> /* for iMagic initialization */
+#endif /* FOOTERS */
+
+#ifndef LACKS_STDLIB_H
+ #include <stdlib.h> /* for abort() */
+#endif /* LACKS_STDLIB_H */
+
+#if !defined(ASSERT)
+#define ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
+#endif
+
+#ifndef LACKS_STRING_H
+ #include <string.h> /* for memset etc */
+#endif /* LACKS_STRING_H */
+
+#if USE_BUILTIN_FFS
+ #ifndef LACKS_STRINGS_H
+ #include <strings.h> /* for ffs */
+ #endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+
+#if HAVE_MMAP
+ #ifndef LACKS_SYS_MMAN_H
+ #include <sys/mman.h> /* for mmap */
+ #endif /* LACKS_SYS_MMAN_H */
+ #ifndef LACKS_FCNTL_H
+ #include <fcntl.h>
+ #endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MORECORE
+ #ifndef LACKS_UNISTD_H
+ #include <unistd.h> /* for sbrk */
+ extern void* sbrk(size_t);
+ #else /* LACKS_UNISTD_H */
+ #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+ extern void* sbrk(ptrdiff_t);
+ /*Amod sbrk is not defined in WIN32 need to check in symbian*/
+ #endif /* FreeBSD etc */
+ #endif /* LACKS_UNISTD_H */
+#endif /* HAVE_MORECORE */
+
+#endif
+
+/*AMOD: For MALLOC_GETPAGESIZE*/
+#if 0 // replaced with GET_PAGE_SIZE() defined in heap.cpp
+#ifndef WIN32
+ #ifndef MALLOC_GETPAGESIZE
+ #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
+ #ifndef _SC_PAGE_SIZE
+ #define _SC_PAGE_SIZE _SC_PAGESIZE
+ #endif
+ #endif
+ #ifdef _SC_PAGE_SIZE
+ #define MALLOC_GETPAGESIZE sysconf(_SC_PAGE_SIZE)
+ #else
+ #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+ extern size_t getpagesize();
+ #define MALLOC_GETPAGESIZE getpagesize()
+ #else
+ #ifdef WIN32 /* use supplied emulation of getpagesize */
+ #define MALLOC_GETPAGESIZE getpagesize()
+ #else
+ #ifndef LACKS_SYS_PARAM_H
+ #include <sys/param.h>
+ #endif
+ #ifdef EXEC_PAGESIZE
+ #define MALLOC_GETPAGESIZE EXEC_PAGESIZE
+ #else
+ #ifdef NBPG
+ #ifndef CLSIZE
+ #define MALLOC_GETPAGESIZE NBPG
+ #else
+ #define MALLOC_GETPAGESIZE (NBPG * CLSIZE)
+ #endif
+ #else
+ #ifdef NBPC
+ #define MALLOC_GETPAGESIZE NBPC
+ #else
+ #ifdef PAGESIZE
+ #define MALLOC_GETPAGESIZE PAGESIZE
+ #else /* just guess */
+ #define MALLOC_GETPAGESIZE ((size_t)4096U)
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+ #endif
+#endif
+#endif
+/*AMOD: For MALLOC_GETPAGESIZE*/
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE (sizeof(size_t))
+#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some plaftorms */
+#define SIZE_T_ZERO ((size_t)0)
+#define SIZE_T_ONE ((size_t)1)
+#define SIZE_T_TWO ((size_t)2)
+#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+//#define IS_ALIGNED(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+#define IS_ALIGNED(A) (((unsigned int)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define ALIGN_OFFSET(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+ ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+ If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+ checks to fail so compiler optimizer can delete code rather than
+ using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL ((void*)(MAX_SIZE_T))
+#define CMFAIL ((TUint8*)(MFAIL)) /* defined for convenience */
+
+#if !HAVE_MMAP
+ #define IS_MMAPPED_BIT (SIZE_T_ZERO)
+ #define USE_MMAP_BIT (SIZE_T_ZERO)
+ #define CALL_MMAP(s) MFAIL
+ #define CALL_MUNMAP(a, s) (-1)
+ #define DIRECT_MMAP(s) MFAIL
+#else /* !HAVE_MMAP */
+ #define IS_MMAPPED_BIT (SIZE_T_ONE)
+ #define USE_MMAP_BIT (SIZE_T_ONE)
+ #ifndef WIN32
+ #define CALL_MUNMAP(a, s) DLUMMAP((a),(s)) /*munmap((a), (s))*/
+ #define MMAP_PROT (PROT_READ|PROT_WRITE)
+ #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif /* MAP_ANON */
+ #ifdef MAP_ANONYMOUS
+ #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
+ #define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, (int)MMAP_FLAGS, -1, 0)
+ #else /* MAP_ANONYMOUS */
+ /*
+ Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+ is unlikely to be needed, but is supplied just in case.
+ */
+ #define MMAP_FLAGS (MAP_PRIVATE)
+ //static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+ #define CALL_MMAP(s) DLMMAP(s)
+ /*#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
+ (dev_zero_fd = open("/dev/zero", O_RDWR), \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+ mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+ */
+ #define CALL_REMAP(a, s, d) DLREMAP((a),(s),(d))
+ #endif /* MAP_ANONYMOUS */
+ #define DIRECT_MMAP(s) CALL_MMAP(s)
+ #else /* WIN32 */
+ #define CALL_MMAP(s) win32mmap(s)
+ #define CALL_MUNMAP(a, s) win32munmap((a), (s))
+ #define DIRECT_MMAP(s) win32direct_mmap(s)
+ #endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MMAP && HAVE_MREMAP
+ #define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#else /* HAVE_MMAP && HAVE_MREMAP */
+ #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+#if HAVE_MORECORE
+ #define CALL_MORECORE(S) SetBrk(S)
+#else /* HAVE_MORECORE */
+ #define CALL_MORECORE(S) MFAIL
+#endif /* HAVE_MORECORE */
+
+/* mstate bit set if continguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT (8U)
+
+
+#if USE_LOCKS
+/*
+ When locks are defined, there are up to two global locks:
+ * If HAVE_MORECORE, iMorecoreMutex protects sequences of calls to
+ MORECORE. In many cases sys_alloc requires two calls, that should
+ not be interleaved with calls by other threads. This does not
+ protect against direct calls to MORECORE by other threads not
+ using this lock, so there is still code to cope the best we can on
+ interference.
+ * iMagicInitMutex ensures that mparams.iMagic and other
+ unique mparams values are initialized only once.
+*/
+ #ifndef WIN32
+ /* By default use posix locks */
+ #include <pthread.h>
+ #define MLOCK_T pthread_mutex_t
+ #define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
+ #define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
+ #define RELEASE_LOCK(l) pthread_mutex_unlock(l)
+
+ #if HAVE_MORECORE
+ //static MLOCK_T iMorecoreMutex = PTHREAD_MUTEX_INITIALIZER;
+ #endif /* HAVE_MORECORE */
+ //static MLOCK_T iMagicInitMutex = PTHREAD_MUTEX_INITIALIZER;
+ #else /* WIN32 */
+ #define MLOCK_T long
+ #define INITIAL_LOCK(l) *(l)=0
+ #define ACQUIRE_LOCK(l) win32_acquire_lock(l)
+ #define RELEASE_LOCK(l) win32_release_lock(l)
+ #if HAVE_MORECORE
+ static MLOCK_T iMorecoreMutex;
+ #endif /* HAVE_MORECORE */
+ static MLOCK_T iMagicInitMutex;
+ #endif /* WIN32 */
+ #define USE_LOCK_BIT (2U)
+#else /* USE_LOCKS */
+ #define USE_LOCK_BIT (0U)
+ #define INITIAL_LOCK(l)
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS && HAVE_MORECORE
+ #define ACQUIRE_MORECORE_LOCK(M) ACQUIRE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
+ #define RELEASE_MORECORE_LOCK(M) RELEASE_LOCK((M->iMorecoreMutex)/*&iMorecoreMutex*/);
+#else /* USE_LOCKS && HAVE_MORECORE */
+ #define ACQUIRE_MORECORE_LOCK(M)
+ #define RELEASE_MORECORE_LOCK(M)
+#endif /* USE_LOCKS && HAVE_MORECORE */
+
+#if USE_LOCKS
+ /*Currently not suporting this*/
+ #define ACQUIRE_MAGIC_INIT_LOCK(M) ACQUIRE_LOCK(((M)->iMagicInitMutex));
+ //AMOD: changed #define ACQUIRE_MAGIC_INIT_LOCK()
+ //#define RELEASE_MAGIC_INIT_LOCK()
+ #define RELEASE_MAGIC_INIT_LOCK(M) RELEASE_LOCK(((M)->iMagicInitMutex));
+#else /* USE_LOCKS */
+ #define ACQUIRE_MAGIC_INIT_LOCK(M)
+ #define RELEASE_MAGIC_INIT_LOCK(M)
+#endif /* USE_LOCKS */
+
+/*CHUNK representation*/
+struct malloc_chunk {
+ size_t iPrevFoot; /* Size of previous chunk (if free). */
+ size_t iHead; /* Size and inuse bits. */
+ struct malloc_chunk* iFd; /* double links -- used only if free. */
+ struct malloc_chunk* iBk;
+};
+
+typedef struct malloc_chunk mchunk;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
+typedef unsigned int bindex_t; /* Described below */
+typedef unsigned int binmap_t; /* Described below */
+typedef unsigned int flag_t; /* The type of various bit flag sets */
+
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+#define MCHUNK_SIZE (sizeof(mchunk))
+
+//#if FOOTERS
+// #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+//#else /* FOOTERS */
+// #define CHUNK_OVERHEAD (SIZE_T_SIZE)
+//#endif /* FOOTERS */
+
+/* MMapped chunks need a second word of overhead ... */
+#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define CHUNK2MEM(p) ((void*)((TUint8*)(p) + TWO_SIZE_T_SIZES))
+#define MEM2CHUNK(mem) ((mchunkptr)((TUint8*)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define ALIGN_AS_CHUNK(A) (mchunkptr)((A) + ALIGN_OFFSET(CHUNK2MEM(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define PAD_REQUEST(req) (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define REQUEST2SIZE(req) (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(req))
+
+/* ------------------ Operations on iHead and foot fields ----------------- */
+
+/*
+ The iHead field of a chunk is or'ed with PINUSE_BIT when previous
+ adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+ use. If the chunk was obtained with mmap, the iPrevFoot field has
+ IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
+ mmapped region to the base of the chunk.
+*/
+#define PINUSE_BIT (SIZE_T_ONE)
+#define CINUSE_BIT (SIZE_T_TWO)
+#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from iHead words */
+#define CINUSE(p) ((p)->iHead & CINUSE_BIT)
+#define PINUSE(p) ((p)->iHead & PINUSE_BIT)
+#define CHUNKSIZE(p) ((p)->iHead & ~(INUSE_BITS))
+
+#define CLEAR_PINUSE(p) ((p)->iHead &= ~PINUSE_BIT)
+#define CLEAR_CINUSE(p) ((p)->iHead &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define CHUNK_PLUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) + (s)))
+#define CHUNK_MINUS_OFFSET(p, s) ((mchunkptr)(((TUint8*)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define NEXT_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) + ((p)->iHead & ~INUSE_BITS)))
+#define PREV_CHUNK(p) ((mchunkptr)( ((TUint8*)(p)) - ((p)->iPrevFoot) ))
+
+/* extract next chunk's PINUSE bit */
+#define NEXT_PINUSE(p) ((NEXT_CHUNK(p)->iHead) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define GET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot)
+#define SET_FOOT(p, s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = (s))
+
+/* Set size, PINUSE bit, and foot */
+#define SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s) ((p)->iHead = (s|PINUSE_BIT), SET_FOOT(p, s))
+
+/* Set size, PINUSE bit, foot, and clear next PINUSE */
+#define SET_FREE_WITH_PINUSE(p, s, n) (CLEAR_PINUSE(n), SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, s))
+
+#define IS_MMAPPED(p) (!((p)->iHead & PINUSE_BIT) && ((p)->iPrevFoot & IS_MMAPPED_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define OVERHEAD_FOR(p) (IS_MMAPPED(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* Return true if malloced space is not necessarily cleared */
+#if MMAP_CLEARS
+ #define CALLOC_MUST_CLEAR(p) (!IS_MMAPPED(p))
+#else /* MMAP_CLEARS */
+ #define CALLOC_MUST_CLEAR(p) (1)
+#endif /* MMAP_CLEARS */
+
+/* ---------------------- Overlaid data structures ----------------------- */
+struct malloc_tree_chunk {
+ /* The first four fields must be compatible with malloc_chunk */
+ size_t iPrevFoot;
+ size_t iHead;
+ struct malloc_tree_chunk* iFd;
+ struct malloc_tree_chunk* iBk;
+
+ struct malloc_tree_chunk* iChild[2];
+ struct malloc_tree_chunk* iParent;
+ bindex_t iIndex;
+};
+
+typedef struct malloc_tree_chunk tchunk;
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define LEFTMOST_CHILD(t) ((t)->iChild[0] != 0? (t)->iChild[0] : (t)->iChild[1])
+/*Segment structur*/
+//struct malloc_segment {
+// TUint8* iBase; /* base address */
+// size_t iSize; /* allocated size */
+//};
+
+#define IS_MMAPPED_SEGMENT(S) ((S)->iSflags & IS_MMAPPED_BIT)
+#define IS_EXTERN_SEGMENT(S) ((S)->iSflags & EXTERN_BIT)
+
+typedef struct malloc_segment msegment;
+typedef struct malloc_segment* msegmentptr;
+
+/*Malloc State data structur*/
+
+//#define NSMALLBINS (32U)
+//#define NTREEBINS (32U)
+#define SMALLBIN_SHIFT (3U)
+#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT (8U)
+#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+/*struct malloc_state {
+ binmap_t iSmallMap;
+ binmap_t iTreeMap;
+ size_t iDvSize;
+ size_t iTopSize;
+ mchunkptr iDv;
+ mchunkptr iTop;
+ size_t iTrimCheck;
+ mchunkptr iSmallBins[(NSMALLBINS+1)*2];
+ tbinptr iTreeBins[NTREEBINS];
+ msegment iSeg;
+ };*/
+/*
+struct malloc_state {
+ binmap_t iSmallMap;
+ binmap_t iTreeMap;
+ size_t iDvSize;
+ size_t iTopSize;
+ TUint8* iLeastAddr;
+ mchunkptr iDv;
+ mchunkptr iTop;
+ size_t iTrimCheck;
+ size_t iMagic;
+ mchunkptr iSmallBins[(NSMALLBINS+1)*2];
+ tbinptr iTreeBins[NTREEBINS];
+ size_t iFootprint;
+ size_t iMaxFootprint;
+ flag_t iMflags;
+#if USE_LOCKS
+ MLOCK_T iMutex;
+ MLOCK_T iMagicInitMutex;
+ MLOCK_T iMorecoreMutex;
+#endif
+ msegment iSeg;
+};
+*/
+typedef struct malloc_state* mstate;
+
+/* ------------- Global malloc_state and malloc_params ------------------- */
+
+/*
+ malloc_params holds global properties, including those that can be
+ dynamically set using mallopt. There is a single instance, mparams,
+ initialized in init_mparams.
+*/
+
+struct malloc_params {
+ size_t iMagic;
+ size_t iPageSize;
+ size_t iGranularity;
+ size_t iMmapThreshold;
+ size_t iTrimThreshold;
+ flag_t iDefaultMflags;
+#if USE_LOCKS
+ MLOCK_T iMagicInitMutex;
+#endif /* USE_LOCKS */
+};
+
+/* The global malloc_state used for all non-"mspace" calls */
+/*AMOD: Need to check this as this will be the member of the class*/
+
+//static struct malloc_state _gm_;
+//#define GM (&_gm_)
+
+//#define IS_GLOBAL(M) ((M) == &_gm_)
+/*AMOD: has changed*/
+#define IS_GLOBAL(M) ((M) == GM)
+#define IS_INITIALIZED(M) ((M)->iTop != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* Operations on iMflags */
+
+#define USE_LOCK(M) ((M)->iMflags & USE_LOCK_BIT)
+#define ENABLE_LOCK(M) ((M)->iMflags |= USE_LOCK_BIT)
+#define DISABLE_LOCK(M) ((M)->iMflags &= ~USE_LOCK_BIT)
+
+#define USE_MMAP(M) ((M)->iMflags & USE_MMAP_BIT)
+#define ENABLE_MMAP(M) ((M)->iMflags |= USE_MMAP_BIT)
+#define DISABLE_MMAP(M) ((M)->iMflags &= ~USE_MMAP_BIT)
+
+#define USE_NONCONTIGUOUS(M) ((M)->iMflags & USE_NONCONTIGUOUS_BIT)
+#define DISABLE_CONTIGUOUS(M) ((M)->iMflags |= USE_NONCONTIGUOUS_BIT)
+
+#define SET_LOCK(M,L) ((M)->iMflags = (L)? ((M)->iMflags | USE_LOCK_BIT) : ((M)->iMflags & ~USE_LOCK_BIT))
+
+/* page-align a size */
+#define PAGE_ALIGN(S) (((S) + (mparams.iPageSize)) & ~(mparams.iPageSize - SIZE_T_ONE))
+
+/* iGranularity-align a size */
+#define GRANULARITY_ALIGN(S) (((S) + (mparams.iGranularity)) & ~(mparams.iGranularity - SIZE_T_ONE))
+
+#define IS_PAGE_ALIGNED(S) (((size_t)(S) & (mparams.iPageSize - SIZE_T_ONE)) == 0)
+#define IS_GRANULARITY_ALIGNED(S) (((size_t)(S) & (mparams.iGranularity - SIZE_T_ONE)) == 0)
+
+/* True if segment S holds address A */
+#define SEGMENT_HOLDS(S, A) ((TUint8*)(A) >= S->iBase && (TUint8*)(A) < S->iBase + S->iSize)
+
+#ifndef MORECORE_CANNOT_TRIM
+ #define SHOULD_TRIM(M,s) ((s) > (M)->iTrimCheck)
+#else /* MORECORE_CANNOT_TRIM */
+ #define SHOULD_TRIM(M,s) (0)
+#endif /* MORECORE_CANNOT_TRIM */
+
+/*
+ TOP_FOOT_SIZE is padding at the end of a segment, including space
+ that may be needed to place segment records and fenceposts when new
+ noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE (ALIGN_OFFSET(CHUNK2MEM(0))+PAD_REQUEST(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
+/* ------------------------------- Hooks -------------------------------- */
+
+/*
+ PREACTION should be defined to return 0 on success, and nonzero on
+ failure. If you are not using locking, you can redefine these to do
+ anything you like.
+*/
+
+#if USE_LOCKS
+ /* Ensure locks are initialized */
+ #define GLOBALLY_INITIALIZE() (mparams.iPageSize == 0 && init_mparams())
+ #define PREACTION(M) (USE_LOCK((M))?(ACQUIRE_LOCK((M)->iMutex),0):0) /*Action to take like lock before alloc*/
+ #define POSTACTION(M) { if (USE_LOCK(M)) RELEASE_LOCK((M)->iMutex); }
+
+#else /* USE_LOCKS */
+ #ifndef PREACTION
+ #define PREACTION(M) (0)
+ #endif /* PREACTION */
+ #ifndef POSTACTION
+ #define POSTACTION(M)
+ #endif /* POSTACTION */
+#endif /* USE_LOCKS */
+
+/*
+ CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
+ USAGE_ERROR_ACTION is triggered on detected bad frees and
+ reallocs. The argument p is an address that might have triggered the
+ fault. It is ignored by the two predefined actions, but might be
+ useful in custom actions that try to help diagnose errors.
+*/
+
+#if PROCEED_ON_ERROR
+ /* A count of the number of corruption errors causing resets */
+ int malloc_corruption_error_count;
+ /* default corruption action */
+ static void ResetOnError(mstate m);
+ #define CORRUPTION_ERROR_ACTION(m) ResetOnError(m)
+ #define USAGE_ERROR_ACTION(m, p)
+#else /* PROCEED_ON_ERROR */
+ #ifndef CORRUPTION_ERROR_ACTION
+ #define CORRUPTION_ERROR_ACTION(m) ABORT
+ #endif /* CORRUPTION_ERROR_ACTION */
+ #ifndef USAGE_ERROR_ACTION
+ #define USAGE_ERROR_ACTION(m,p) ABORT
+ #endif /* USAGE_ERROR_ACTION */
+#endif /* PROCEED_ON_ERROR */
+
+
+#ifdef _DEBUG
+ #define CHECK_FREE_CHUNK(M,P) DoCheckFreeChunk(M,P)
+ #define CHECK_INUSE_CHUNK(M,P) DoCheckInuseChunk(M,P)
+ #define CHECK_TOP_CHUNK(M,P) DoCheckTopChunk(M,P)
+ #define CHECK_MALLOCED_CHUNK(M,P,N) DoCheckMallocedChunk(M,P,N)
+ #define CHECK_MMAPPED_CHUNK(M,P) DoCheckMmappedChunk(M,P)
+ #define CHECK_MALLOC_STATE(M) DoCheckMallocState(M)
+#else /* DEBUG */
+ #define CHECK_FREE_CHUNK(M,P)
+ #define CHECK_INUSE_CHUNK(M,P)
+ #define CHECK_MALLOCED_CHUNK(M,P,N)
+ #define CHECK_MMAPPED_CHUNK(M,P)
+ #define CHECK_MALLOC_STATE(M)
+ #define CHECK_TOP_CHUNK(M,P)
+#endif /* DEBUG */
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define IS_SMALL(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define SMALL_INDEX(s) ((s) >> SMALLBIN_SHIFT)
+#define SMALL_INDEX2SIZE(i) ((i) << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX (SMALL_INDEX(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define SMALLBIN_AT(M, i) ((sbinptr)((TUint8*)&((M)->iSmallBins[(i)<<1])))
+#define TREEBIN_AT(M,i) (&((M)->iTreeBins[i]))
+
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define BIT_FOR_TREE_INDEX(i) (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define LEFTSHIFT_FOR_TREE_INDEX(i) ((i == NTREEBINS-1)? 0 : ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define MINSIZE_FOR_TREE_INDEX(i) ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+
+/* ------------------------ Operations on bin maps ----------------------- */
+/* bit corresponding to given index */
+#define IDX2BIT(i) ((binmap_t)(1) << (i))
+/* Mark/Clear bits with given index */
+#define MARK_SMALLMAP(M,i) ((M)->iSmallMap |= IDX2BIT(i))
+#define CLEAR_SMALLMAP(M,i) ((M)->iSmallMap &= ~IDX2BIT(i))
+#define SMALLMAP_IS_MARKED(M,i) ((M)->iSmallMap & IDX2BIT(i))
+#define MARK_TREEMAP(M,i) ((M)->iTreeMap |= IDX2BIT(i))
+#define CLEAR_TREEMAP(M,i) ((M)->iTreeMap &= ~IDX2BIT(i))
+#define TREEMAP_IS_MARKED(M,i) ((M)->iTreeMap & IDX2BIT(i))
+
+ /* isolate the least set bit of a bitmap */
+#define LEAST_BIT(x) ((x) & -(x))
+
+/* mask with all bits to left of least bit of x on */
+#define LEFT_BITS(x) ((x<<1) | -(x<<1))
+
+/* mask with all bits to left of or equal to least bit of x on */
+#define SAME_OR_LEFT_BITS(x) ((x) | -(x))
+
+#if !INSECURE
+ /* Check if address a is at least as high as any from MORECORE or MMAP */
+ #define OK_ADDRESS(M, a) ((TUint8*)(a) >= (M)->iLeastAddr)
+ /* Check if address of next chunk n is higher than base chunk p */
+ #define OK_NEXT(p, n) ((TUint8*)(p) < (TUint8*)(n))
+ /* Check if p has its CINUSE bit on */
+ #define OK_CINUSE(p) CINUSE(p)
+ /* Check if p has its PINUSE bit on */
+ #define OK_PINUSE(p) PINUSE(p)
+#else /* !INSECURE */
+ #define OK_ADDRESS(M, a) (1)
+ #define OK_NEXT(b, n) (1)
+ #define OK_CINUSE(p) (1)
+ #define OK_PINUSE(p) (1)
+#endif /* !INSECURE */
+
+#if (FOOTERS && !INSECURE)
+ /* Check if (alleged) mstate m has expected iMagic field */
+ #define OK_MAGIC(M) ((M)->iMagic == mparams.iMagic)
+#else /* (FOOTERS && !INSECURE) */
+ #define OK_MAGIC(M) (1)
+#endif /* (FOOTERS && !INSECURE) */
+
+/* In gcc, use __builtin_expect to minimize impact of checks */
+#if !INSECURE
+ #if defined(__GNUC__) && __GNUC__ >= 3
+ #define RTCHECK(e) __builtin_expect(e, 1)
+ #else /* GNUC */
+ #define RTCHECK(e) (e)
+ #endif /* GNUC */
+
+#else /* !INSECURE */
+ #define RTCHECK(e) (1)
+#endif /* !INSECURE */
+/* macros to set up inuse chunks with or without footers */
+#if !FOOTERS
+ #define MARK_INUSE_FOOT(M,p,s)
+ /* Set CINUSE bit and PINUSE bit of next chunk */
+ #define SET_INUSE(M,p,s) ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
+ /* Set CINUSE and PINUSE of this chunk and PINUSE of next chunk */
+ #define SET_INUSE_AND_PINUSE(M,p,s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT)
+ /* Set size, CINUSE and PINUSE bit of this chunk */
+ #define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s) ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT))
+#else /* FOOTERS */
+ /* Set foot of inuse chunk to be xor of mstate and seed */
+ #define MARK_INUSE_FOOT(M,p,s) (((mchunkptr)((TUint8*)(p) + (s)))->iPrevFoot = ((size_t)(M) ^ mparams.iMagic))
+ #define GET_MSTATE_FOR(p) ((mstate)(((mchunkptr)((TUint8*)(p)+(CHUNKSIZE(p))))->iPrevFoot ^ mparams.iMagic))
+ #define SET_INUSE(M,p,s)\
+ ((p)->iHead = (((p)->iHead & PINUSE_BIT)|s|CINUSE_BIT),\
+ (((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT), \
+ MARK_INUSE_FOOT(M,p,s))
+ #define SET_INUSE_AND_PINUSE(M,p,s)\
+ ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
+ (((mchunkptr)(((TUint8*)(p)) + (s)))->iHead |= PINUSE_BIT),\
+ MARK_INUSE_FOOT(M,p,s))
+ #define SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(M, p, s)\
+ ((p)->iHead = (s|PINUSE_BIT|CINUSE_BIT),\
+ MARK_INUSE_FOOT(M, p, s))
+#endif /* !FOOTERS */
+
+
+#if ONLY_MSPACES
+#define INTERNAL_MALLOC(m, b) mspace_malloc(m, b)
+#define INTERNAL_FREE(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+ #if MSPACES
+ #define INTERNAL_MALLOC(m, b) (m == GM)? dlmalloc(b) : mspace_malloc(m, b)
+ #define INTERNAL_FREE(m, mem) if (m == GM) dlfree(mem); else mspace_free(m,mem);
+ #else /* MSPACES */
+ #define INTERNAL_MALLOC(m, b) dlmalloc(b)
+ #define INTERNAL_FREE(m, mem) dlfree(mem)
+ #endif /* MSPACES */
+#endif /* ONLY_MSPACES */
+
+ #ifndef NDEBUG
+ #define CHECKING 1
+ #endif
+// #define HYSTERESIS 4
+ #define HYSTERESIS 1
+ #define HYSTERESIS_BYTES (2*PAGESIZE)
+ #define HYSTERESIS_GROW (HYSTERESIS*PAGESIZE)
+
+ #if CHECKING
+ #define CHECK(x) x
+ #else
+ #undef ASSERT
+ #define ASSERT(x) (void)0
+ #define CHECK(x) (void)0
+ #endif
+
+#endif/*__DLA__*/
diff --git a/src/corelib/arch/symbian/heap_hybrid.cpp b/src/corelib/arch/symbian/heap_hybrid.cpp
new file mode 100644
index 0000000000..6e2ef21e93
--- /dev/null
+++ b/src/corelib/arch/symbian/heap_hybrid.cpp
@@ -0,0 +1,3346 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qt_hybridheap_symbian_p.h"
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+// if non zero this causes the iSlabs to be configured only when the chunk size exceeds this level
+#define DELAYED_SLAB_THRESHOLD (64*1024) // 64KB seems about right based on trace data
+#define SLAB_CONFIG 0x3fff // Use all slab sizes 4,8..56 bytes. This is more efficient for large heaps as Qt tends to have
+
+#ifdef _DEBUG
+#define __SIMULATE_ALLOC_FAIL(s) if (CheckForSimulatedAllocFail()) {s}
+#define __ALLOC_DEBUG_HEADER(s) (s += EDebugHdrSize)
+#define __SET_DEBUG_DATA(p,n,c) (((SDebugCell*)(p))->nestingLevel = (n), ((SDebugCell*)(p))->allocCount = (c))
+#define __GET_USER_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) + EDebugHdrSize : NULL)
+#define __GET_DEBUG_DATA_BFR(p) ((p!=0) ? (TUint8*)(p) - EDebugHdrSize : NULL)
+#define __ZAP_CELL(p) memset( (TUint8*)p, 0xde, (AllocLen(__GET_USER_DATA_BFR(p))+EDebugHdrSize))
+#define __DEBUG_SAVE(p) TInt dbgNestLevel = ((SDebugCell*)p)->nestingLevel
+#define __DEBUG_RESTORE(p) if (p) {((SDebugCell*)p)->nestingLevel = dbgNestLevel;}
+#define __DEBUG_HDR_SIZE EDebugHdrSize
+#define __REMOVE_DBG_HDR(n) (n*EDebugHdrSize)
+#define __GET_AVAIL_BLOCK_SIZE(s) ( (s<EDebugHdrSize) ? 0 : s-EDebugHdrSize )
+#define __UPDATE_ALLOC_COUNT(o,n,c) if (o!=n && n) {((SDebugCell*)n)->allocCount = (c);}
+#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
+#define __INCREMENT_COUNTERS(p) iCellCount++, iTotalAllocSize += AllocLen(p)
+#define __DECREMENT_COUNTERS(p) iCellCount--, iTotalAllocSize -= AllocLen(p)
+#define __UPDATE_TOTAL_ALLOC(p,s) iTotalAllocSize += (AllocLen(__GET_USER_DATA_BFR(p)) - s)
+
+#else
+#define __SIMULATE_ALLOC_FAIL(s)
+#define __ALLOC_DEBUG_HEADER(s)
+#define __SET_DEBUG_DATA(p,n,c)
+#define __GET_USER_DATA_BFR(p) (p)
+#define __GET_DEBUG_DATA_BFR(p) (p)
+#define __ZAP_CELL(p)
+#define __DEBUG_SAVE(p)
+#define __DEBUG_RESTORE(p)
+#define __DEBUG_HDR_SIZE 0
+#define __REMOVE_DBG_HDR(n) 0
+#define __GET_AVAIL_BLOCK_SIZE(s) (s)
+#define __UPDATE_ALLOC_COUNT(o,n,c)
+#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
+#define __INCREMENT_COUNTERS(p)
+#define __DECREMENT_COUNTERS(p)
+#define __UPDATE_TOTAL_ALLOC(p,s)
+
+#endif
+
+
+#define MEMORY_MONITORED (iFlags & EMonitorMemory)
+#define GM (&iGlobalMallocState)
+#define IS_FIXED_HEAP (iFlags & EFixedSize)
+#define __INIT_COUNTERS(i) iCellCount=i,iTotalAllocSize=i
+#define __POWER_OF_2(x) (!((x)&((x)-1)))
+
+#define __DL_BFR_CHECK(M,P) \
+ if ( MEMORY_MONITORED ) \
+ if ( !IS_ALIGNED(P) || ((TUint8*)(P)<M->iSeg.iBase) || ((TUint8*)(P)>(M->iSeg.iBase+M->iSeg.iSize))) \
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress); \
+ else DoCheckInuseChunk(M, MEM2CHUNK(P))
+
+#ifndef __KERNEL_MODE__
+
+#define __SLAB_BFR_CHECK(S,P,B) \
+ if ( MEMORY_MONITORED ) \
+ if ( ((TUint32)P & 0x3) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)S), HEAP_PANIC(ETHeapBadCellAddress); \
+ else DoCheckSlab(S, EPartialFullSlab, P), BuildPartialSlabBitmap(B,S,P)
+#define __PAGE_BFR_CHECK(P) \
+ if ( MEMORY_MONITORED ) \
+ if ( ((TUint32)P & ((1 << iPageSize)-1)) || ((TUint8*)P<iMemBase) || ((TUint8*)(P)>(TUint8*)this)) \
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapCorruption, (TUint32)this, (TUint32)P, (TUint32)0), HEAP_PANIC(ETHeapBadCellAddress)
+
+#endif
+
+#ifdef _MSC_VER
+// This is required while we are still using VC6 to compile, so as to avoid warnings that cannot be fixed
+// without having to edit the original Doug Lea source. The 4146 warnings are due to the original code having
+// a liking for negating unsigned numbers and the 4127 warnings are due to the original code using the RTCHECK
+// macro with values that are always defined as 1. It is better to turn these warnings off than to introduce
+// diffs between the original Doug Lea implementation and our adaptation of it
+#pragma warning( disable : 4146 ) /* unary minus operator applied to unsigned type, result still unsigned */
+#pragma warning( disable : 4127 ) /* conditional expression is constant */
+#endif // _MSC_VER
+
+
+/**
+@SYMPatchable
+@publishedPartner
+@released
+
+Defines the minimum cell size of a heap.
+
+The constant can be changed at ROM build time using patchdata OBY keyword.
+
+@deprecated Patching this constant no longer has any effect.
+*/
+#ifdef __X86GCC__ // For X86GCC we don't use the proper data import attribute
+#undef IMPORT_D // since the constants are not really imported. GCC doesn't
+#define IMPORT_D // allow imports from self.
+#endif
+IMPORT_D extern const TInt KHeapMinCellSize;
+
+/**
+@SYMPatchable
+@publishedPartner
+@released
+
+This constant defines the ratio that determines the amount of hysteresis between heap growing and heap
+shrinking.
+It is a 32-bit fixed point number where the radix point is defined to be
+between bits 7 and 8 (where the LSB is bit 0) i.e. using standard notation, a Q8 or a fx24.8
+fixed point number. For example, for a ratio of 2.0, set KHeapShrinkHysRatio=0x200.
+
+The heap shrinking hysteresis value is calculated to be:
+@code
+KHeapShrinkHysRatio*(iGrowBy>>8)
+@endcode
+where iGrowBy is a page aligned value set by the argument, aGrowBy, to the RHeap constructor.
+The default hysteresis value is iGrowBy bytes i.e. KHeapShrinkHysRatio=2.0.
+
+Memory usage may be improved by reducing the heap shrinking hysteresis
+by setting 1.0 < KHeapShrinkHysRatio < 2.0. Heap shrinking hysteresis is disabled/removed
+when KHeapShrinkHysRatio <= 1.0.
+
+The constant can be changed at ROM build time using patchdata OBY keyword.
+*/
+IMPORT_D extern const TInt KHeapShrinkHysRatio;
+
+UEXPORT_C TInt RHeap::AllocLen(const TAny* aCell) const
+{
+ const MAllocator* m = this;
+ return m->AllocLen(aCell);
+}
+
+UEXPORT_C TAny* RHeap::Alloc(TInt aSize)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->Alloc(aSize);
+}
+
+UEXPORT_C void RHeap::Free(TAny* aCell)
+{
+ const MAllocator* m = this;
+ ((MAllocator*)m)->Free(aCell);
+}
+
+UEXPORT_C TAny* RHeap::ReAlloc(TAny* aCell, TInt aSize, TInt aMode)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->ReAlloc(aCell, aSize, aMode);
+}
+
+UEXPORT_C TInt RHeap::DebugFunction(TInt aFunc, TAny* a1, TAny* a2)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->DebugFunction(aFunc, a1, a2);
+}
+
+UEXPORT_C TInt RHeap::Extension_(TUint aExtensionId, TAny*& a0, TAny* a1)
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->Extension_(aExtensionId, a0, a1);
+}
+
+#ifndef __KERNEL_MODE__
+
+EXPORT_C TInt RHeap::AllocSize(TInt& aTotalAllocSize) const
+{
+ const MAllocator* m = this;
+ return m->AllocSize(aTotalAllocSize);
+}
+
+EXPORT_C TInt RHeap::Available(TInt& aBiggestBlock) const
+{
+ const MAllocator* m = this;
+ return m->Available(aBiggestBlock);
+}
+
+EXPORT_C void RHeap::Reset()
+{
+ const MAllocator* m = this;
+ ((MAllocator*)m)->Reset();
+}
+
+EXPORT_C TInt RHeap::Compress()
+{
+ const MAllocator* m = this;
+ return ((MAllocator*)m)->Compress();
+}
+#endif
+
+RHybridHeap::RHybridHeap()
+ {
+ // This initialisation cannot be done in RHeap() for compatibility reasons
+ iMaxLength = iChunkHandle = iNestingLevel = 0;
+ iTop = NULL;
+ iFailType = ENone;
+ iTestData = NULL;
+ }
+
+void RHybridHeap::operator delete(TAny*, TAny*)
+/**
+Called if constructor issued by operator new(TUint aSize, TAny* aBase) throws exception.
+This is dummy as corresponding new operator does not allocate memory.
+*/
+{}
+
+
+#ifndef __KERNEL_MODE__
+void RHybridHeap::Lock() const
+ /**
+ @internalComponent
+*/
+ {((RFastLock&)iLock).Wait();}
+
+
+void RHybridHeap::Unlock() const
+ /**
+ @internalComponent
+*/
+ {((RFastLock&)iLock).Signal();}
+
+
+TInt RHybridHeap::ChunkHandle() const
+ /**
+ @internalComponent
+*/
+{
+ return iChunkHandle;
+}
+
+#else
+//
+// This method is implemented in kheap.cpp
+//
+//void RHybridHeap::Lock() const
+ /**
+ @internalComponent
+*/
+// {;}
+
+
+
+//
+// This method is implemented in kheap.cpp
+//
+//void RHybridHeap::Unlock() const
+ /**
+ @internalComponent
+*/
+// {;}
+
+
+TInt RHybridHeap::ChunkHandle() const
+ /**
+ @internalComponent
+*/
+{
+ return 0;
+}
+#endif
+
+RHybridHeap::RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust)
+/**
+Constructor for a non fixed heap. Unlike the fixed heap, this heap is quite flexible in terms of its minimum and
+maximum lengths and in that it can use the hybrid allocator if all of its requirements are met.
+*/
+ : iOffset(aOffset), iChunkSize(aMinLength)
+ {
+ __ASSERT_ALWAYS(iOffset>=0, HEAP_PANIC(ETHeapNewBadOffset));
+
+ iChunkHandle = aChunkHandle;
+ iMinLength = aMinLength;
+ iMaxLength = aMaxLength;
+
+ // If the user has explicitly specified 0 as the aGrowBy value, set it to 1 so that it will be rounded up to the nearst page size
+ if (aGrowBy == 0)
+ aGrowBy = 1;
+ GET_PAGE_SIZE(iPageSize);
+ iGrowBy = _ALIGN_UP(aGrowBy, iPageSize);
+
+ Construct(aSingleThread, aDLOnly, aUseAdjust, aAlign);
+ }
+
+RHybridHeap::RHybridHeap(TInt aMaxLength, TInt aAlign, TBool aSingleThread)
+/**
+Constructor for a fixed heap. We have restrictions in that we have fixed minimum and maximum lengths and cannot grow
+and we only use DL allocator.
+*/
+ : iOffset(0), iChunkSize(aMaxLength)
+ {
+ iChunkHandle = NULL;
+ iMinLength = aMaxLength;
+ iMaxLength = aMaxLength;
+ iGrowBy = 0;
+
+ Construct(aSingleThread, ETrue, ETrue, aAlign);
+ }
+
+TAny* RHybridHeap::operator new(TUint aSize, TAny* aBase) __NO_THROW
+{
+ __ASSERT_ALWAYS(aSize>=sizeof(RHybridHeap), HEAP_PANIC(ETHeapNewBadSize));
+ RHybridHeap* h = (RHybridHeap*)aBase;
+ h->iBase = ((TUint8*)aBase) + aSize;
+ return aBase;
+}
+
+void RHybridHeap::Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign)
+{
+ iAlign = aAlign ? aAlign : RHybridHeap::ECellAlignment;
+ __ASSERT_ALWAYS((TUint32)iAlign>=sizeof(TAny*) && __POWER_OF_2(iAlign), HEAP_PANIC(ETHeapNewBadAlignment));
+
+ // This initialisation cannot be done in RHeap() for compatibility reasons
+ iTop = NULL;
+ iFailType = ENone;
+ iNestingLevel = 0;
+ iTestData = NULL;
+
+ iHighWaterMark = iMinLength;
+ iAllocCount = 0;
+ iFlags = aSingleThread ? ESingleThreaded : 0;
+ iGrowBy = _ALIGN_UP(iGrowBy, iPageSize);
+
+ if ( iMinLength == iMaxLength )
+ {
+ iFlags |= EFixedSize;
+ aDLOnly = ETrue;
+ }
+#ifndef __KERNEL_MODE__
+#ifdef DELAYED_SLAB_THRESHOLD
+ iSlabInitThreshold = DELAYED_SLAB_THRESHOLD;
+#else
+ iSlabInitThreshold = 0;
+#endif // DELAYED_SLAB_THRESHOLD
+ iUseAdjust = aUseAdjust;
+ iDLOnly = aDLOnly;
+#else
+ (void)aUseAdjust;
+#endif
+ // Initialise suballocators
+ // if DL only is required then it cannot allocate slab or page memory
+ // so these sub-allocators should be disabled. Otherwise initialise with default values
+ if ( aDLOnly )
+ {
+ Init(0, 0);
+ }
+ else
+ {
+ Init(SLAB_CONFIG, 16);
+ }
+
+#ifdef ENABLE_BTRACE
+
+ TUint32 traceData[4];
+ traceData[0] = iMinLength;
+ traceData[1] = iMaxLength;
+ traceData[2] = iGrowBy;
+ traceData[3] = iAlign;
+ BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 11, traceData, sizeof(traceData));
+#endif
+
+}
+
+#ifndef __KERNEL_MODE__
+TInt RHybridHeap::ConstructLock(TUint32 aMode)
+{
+ TBool duplicateLock = EFalse;
+ TInt r = KErrNone;
+ if (!(iFlags & ESingleThreaded))
+ {
+ duplicateLock = aMode & UserHeap::EChunkHeapSwitchTo;
+ r = iLock.CreateLocal(duplicateLock ? EOwnerThread : EOwnerProcess);
+ if( r != KErrNone)
+ {
+ iChunkHandle = 0;
+ return r;
+ }
+ }
+
+ if ( aMode & UserHeap::EChunkHeapSwitchTo )
+ User::SwitchHeap(this);
+
+ iHandles = &iChunkHandle;
+ if (!(iFlags & ESingleThreaded))
+ {
+ // now change the thread-relative chunk/semaphore handles into process-relative handles
+ iHandleCount = 2;
+ if(duplicateLock)
+ {
+ RHandleBase s = iLock;
+ r = iLock.Duplicate(RThread());
+ s.Close();
+ }
+ if (r==KErrNone && (aMode & UserHeap::EChunkHeapDuplicate))
+ {
+ r = ((RChunk*)&iChunkHandle)->Duplicate(RThread());
+ if (r!=KErrNone)
+ iLock.Close(), iChunkHandle=0;
+ }
+ }
+ else
+ {
+ iHandleCount = 1;
+ if (aMode & UserHeap::EChunkHeapDuplicate)
+ r = ((RChunk*)&iChunkHandle)->Duplicate(RThread(), EOwnerThread);
+ }
+
+ return r;
+}
+#endif
+
+void RHybridHeap::Init(TInt aBitmapSlab, TInt aPagePower)
+{
+ /*Moved code which does initialization */
+ iTop = (TUint8*)this + iMinLength;
+ iBase = Ceiling(iBase, ECellAlignment); // Align iBase address
+
+ __INIT_COUNTERS(0);
+ // memset(&mparams,0,sizeof(mparams));
+
+ InitDlMalloc(iTop - iBase, 0);
+
+#ifndef __KERNEL_MODE__
+ SlabInit();
+ iSlabConfigBits = aBitmapSlab;
+ if ( iChunkSize > iSlabInitThreshold )
+ {
+ iSlabInitThreshold = KMaxTInt32;
+ SlabConfig(aBitmapSlab); // Delayed slab configuration done
+ }
+ if ( aPagePower )
+ {
+ RChunk chunk;
+ chunk.SetHandle(iChunkHandle);
+ iMemBase = chunk.Base(); // Store base address for paged allocator
+ }
+
+ /*10-1K,11-2K,12-4k,13-8K,14-16K,15-32K,16-64K*/
+ PagedInit(aPagePower);
+
+#ifdef ENABLE_BTRACE
+ TUint32 traceData[3];
+ traceData[0] = aBitmapSlab;
+ traceData[1] = aPagePower;
+ traceData[2] = GM->iTrimCheck;
+ BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 0, traceData, sizeof(traceData));
+#endif
+#else
+ (void)aBitmapSlab;
+ (void)aPagePower;
+#endif // __KERNEL_MODE__
+
+}
+
+
+TInt RHybridHeap::AllocLen(const TAny* aCell) const
+{
+ aCell = __GET_DEBUG_DATA_BFR(aCell);
+
+ if (PtrDiff(aCell, this) >= 0)
+ {
+ mchunkptr m = MEM2CHUNK(aCell);
+ return CHUNKSIZE(m) - OVERHEAD_FOR(m) - __DEBUG_HDR_SIZE;
+ }
+#ifndef __KERNEL_MODE__
+ if ( aCell )
+ {
+ if (LowBits(aCell, iPageSize) )
+ return SlabHeaderSize(slab::SlabFor(aCell)->iHeader) - __DEBUG_HDR_SIZE;
+
+ return PagedSize((void*)aCell) - __DEBUG_HDR_SIZE;
+ }
+#endif
+ return 0; // NULL pointer situation, should PANIC !!
+}
+
+#ifdef __KERNEL_MODE__
+TAny* RHybridHeap::Alloc(TInt aSize)
+{
+ __CHECK_THREAD_STATE;
+ __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
+ __SIMULATE_ALLOC_FAIL(return NULL;)
+ Lock();
+ __ALLOC_DEBUG_HEADER(aSize);
+ TAny* addr = DlMalloc(aSize);
+ if ( addr )
+ {
+// iCellCount++;
+ __SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
+ addr = __GET_USER_DATA_BFR(addr);
+ __INCREMENT_COUNTERS(addr);
+ memclr(addr, AllocLen(addr));
+ }
+ Unlock();
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ if ( addr )
+ {
+ TUint32 traceData[3];
+ traceData[0] = AllocLen(addr);
+ traceData[1] = aSize - __DEBUG_HDR_SIZE;
+ traceData[2] = 0;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
+ }
+ else
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
+ }
+#endif
+ return addr;
+}
+#else
+
+TAny* RHybridHeap::Alloc(TInt aSize)
+{
+ __ASSERT_ALWAYS((TUint)aSize<(KMaxTInt/2),HEAP_PANIC(ETHeapBadAllocatedCellSize));
+ __SIMULATE_ALLOC_FAIL(return NULL;)
+
+ TAny* addr;
+#ifdef ENABLE_BTRACE
+ TInt aSubAllocator=0;
+#endif
+
+ Lock();
+
+ __ALLOC_DEBUG_HEADER(aSize);
+
+ if (aSize < iSlabThreshold)
+ {
+ TInt ix = iSizeMap[(aSize+3)>>2];
+ HEAP_ASSERT(ix != 0xff);
+ addr = SlabAllocate(iSlabAlloc[ix]);
+ if ( !addr )
+ { // Slab allocation has failed, try to allocate from DL
+ addr = DlMalloc(aSize);
+ }
+#ifdef ENABLE_BTRACE
+ else
+ aSubAllocator=1;
+#endif
+ }else if((aSize >> iPageThreshold)==0)
+ {
+ addr = DlMalloc(aSize);
+ }
+ else
+ {
+ addr = PagedAllocate(aSize);
+ if ( !addr )
+ { // Page allocation has failed, try to allocate from DL
+ addr = DlMalloc(aSize);
+ }
+#ifdef ENABLE_BTRACE
+ else
+ aSubAllocator=2;
+#endif
+ }
+
+ if ( addr )
+ {
+// iCellCount++;
+ __SET_DEBUG_DATA(addr, iNestingLevel, ++iAllocCount);
+ addr = __GET_USER_DATA_BFR(addr);
+ __INCREMENT_COUNTERS(addr);
+ }
+ Unlock();
+
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ if ( addr )
+ {
+ TUint32 traceData[3];
+ traceData[0] = AllocLen(addr);
+ traceData[1] = aSize - __DEBUG_HDR_SIZE;
+ traceData[2] = aSubAllocator;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapAlloc, (TUint32)this, (TUint32)addr, traceData, sizeof(traceData));
+ }
+ else
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapAllocFail, (TUint32)this, (TUint32)(aSize - __DEBUG_HDR_SIZE));
+ }
+#endif
+
+ return addr;
+}
+#endif // __KERNEL_MODE__
+
+#ifndef __KERNEL_MODE__
+TInt RHybridHeap::Compress()
+{
+ if ( IS_FIXED_HEAP )
+ return 0;
+
+ Lock();
+ TInt Reduced = SysTrim(GM, 0);
+ if (iSparePage)
+ {
+ Unmap(iSparePage, iPageSize);
+ iSparePage = 0;
+ Reduced += iPageSize;
+ }
+ Unlock();
+ return Reduced;
+}
+#endif
+
+void RHybridHeap::Free(TAny* aPtr)
+{
+ __CHECK_THREAD_STATE;
+ if ( !aPtr )
+ return;
+#ifdef ENABLE_BTRACE
+ TInt aSubAllocator=0;
+#endif
+ Lock();
+
+ aPtr = __GET_DEBUG_DATA_BFR(aPtr);
+
+#ifndef __KERNEL_MODE__
+ if (PtrDiff(aPtr, this) >= 0)
+ {
+#endif
+ __DL_BFR_CHECK(GM, aPtr);
+ __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
+ __ZAP_CELL(aPtr);
+ DlFree( aPtr);
+#ifndef __KERNEL_MODE__
+ }
+
+ else if ( LowBits(aPtr, iPageSize) == 0 )
+ {
+#ifdef ENABLE_BTRACE
+ aSubAllocator = 2;
+#endif
+ __PAGE_BFR_CHECK(aPtr);
+ __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
+ PagedFree(aPtr);
+ }
+ else
+ {
+#ifdef ENABLE_BTRACE
+ aSubAllocator = 1;
+#endif
+ TUint32 bm[4];
+ __SLAB_BFR_CHECK(slab::SlabFor(aPtr),aPtr,bm);
+ __DECREMENT_COUNTERS(__GET_USER_DATA_BFR(aPtr));
+ __ZAP_CELL(aPtr);
+ SlabFree(aPtr);
+ }
+#endif // __KERNEL_MODE__
+// iCellCount--;
+ Unlock();
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ TUint32 traceData;
+ traceData = aSubAllocator;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapFree, (TUint32)this, (TUint32)__GET_USER_DATA_BFR(aPtr), &traceData, sizeof(traceData));
+ }
+#endif
+}
+
+#ifndef __KERNEL_MODE__
+void RHybridHeap::Reset()
+/**
+Frees all allocated cells on this heap.
+*/
+{
+ Lock();
+ if ( !IS_FIXED_HEAP )
+ {
+ if ( GM->iSeg.iSize > (iMinLength - sizeof(*this)) )
+ Unmap(GM->iSeg.iBase + (iMinLength - sizeof(*this)), (GM->iSeg.iSize - (iMinLength - sizeof(*this))));
+ ResetBitmap();
+ if ( !iDLOnly )
+ Init(iSlabConfigBits, iPageThreshold);
+ else
+ Init(0,0);
+ }
+ else Init(0,0);
+ Unlock();
+}
+#endif
+
+TAny* RHybridHeap::ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode)
+{
+ // First handle special case of calling reallocate with NULL aPtr
+ if (!aPtr)
+ {
+ if (( aMode & ENeverMove ) == 0 )
+ {
+ aPtr = Alloc(aSize - __DEBUG_HDR_SIZE);
+ aPtr = __GET_DEBUG_DATA_BFR(aPtr);
+ }
+ return aPtr;
+ }
+
+ TInt oldsize = AllocLen(__GET_USER_DATA_BFR(aPtr)) + __DEBUG_HDR_SIZE;
+
+ // Insist on geometric growth when reallocating memory, this reduces copying and fragmentation
+ // generated during arithmetic growth of buffer/array/vector memory
+ // Experiments have shown that 25% is a good threshold for this policy
+ if (aSize <= oldsize)
+ {
+ if (aSize >= oldsize - (oldsize>>2))
+ return aPtr; // don't change if >75% original size
+ }
+ else
+ {
+ __SIMULATE_ALLOC_FAIL(return NULL;)
+ if (aSize < oldsize + (oldsize>>2))
+ {
+ aSize = _ALIGN_UP(oldsize + (oldsize>>2), 4); // grow to at least 125% original size
+ }
+ }
+ __DEBUG_SAVE(aPtr);
+
+ TAny* newp;
+#ifdef __KERNEL_MODE__
+ Lock();
+ __DL_BFR_CHECK(GM, aPtr);
+ newp = DlRealloc(aPtr, aSize, aMode);
+ Unlock();
+ if ( newp )
+ {
+ if ( aSize > oldsize )
+ memclr(((TUint8*)newp) + oldsize, (aSize-oldsize)); // Buffer has grown in place, clear extra
+ __DEBUG_RESTORE(newp);
+ __UPDATE_ALLOC_COUNT(aPtr, newp, ++iAllocCount);
+ __UPDATE_TOTAL_ALLOC(newp, oldsize);
+ }
+#else
+ // Decide how to reallocate based on (a) the current cell location, (b) the mode requested and (c) the new size
+ if ( PtrDiff(aPtr, this) >= 0 )
+ { // current cell in Doug Lea iArena
+ if ( (aMode & ENeverMove)
+ ||
+ (!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
+ ||
+ ((aSize >= iSlabThreshold) && ((aSize >> iPageThreshold) == 0)) )
+ {
+ Lock();
+ __DL_BFR_CHECK(GM, aPtr);
+ newp = DlRealloc(aPtr, aSize, aMode); // old and new in DL allocator
+ Unlock();
+ __DEBUG_RESTORE(newp);
+ __UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
+ __UPDATE_TOTAL_ALLOC(newp, oldsize);
+ return newp;
+ }
+ }
+ else if (LowBits(aPtr, iPageSize) == 0)
+ { // current cell in paged iArena
+ if ( (aMode & ENeverMove)
+ ||
+ (!(aMode & EAllowMoveOnShrink) && (aSize < oldsize))
+ ||
+ ((aSize >> iPageThreshold) != 0) )
+ {
+ Lock();
+ __PAGE_BFR_CHECK(aPtr);
+ newp = PagedReallocate(aPtr, aSize, aMode); // old and new in paged allocator
+ Unlock();
+ __DEBUG_RESTORE(newp);
+ __UPDATE_ALLOC_COUNT(aPtr,newp, ++iAllocCount);
+ __UPDATE_TOTAL_ALLOC(newp, oldsize);
+ return newp;
+ }
+ }
+ else
+ { // current cell in slab iArena
+ TUint32 bm[4];
+ Lock();
+ __SLAB_BFR_CHECK(slab::SlabFor(aPtr), aPtr, bm);
+ Unlock();
+ if ( aSize <= oldsize)
+ return aPtr;
+ if (aMode & ENeverMove)
+ return NULL; // cannot grow in slab iArena
+ // just use alloc/copy/free...
+ }
+
+ // fallback to allocate and copy
+ // shouldn't get here if we cannot move the cell
+ // __ASSERT(mode == emobile || (mode==efixshrink && size>oldsize));
+
+ newp = Alloc(aSize - __DEBUG_HDR_SIZE);
+ newp = __GET_DEBUG_DATA_BFR(newp);
+ if (newp)
+ {
+ memcpy(newp, aPtr, oldsize<aSize ? oldsize : aSize);
+ __DEBUG_RESTORE(newp);
+ Free(__GET_USER_DATA_BFR(aPtr));
+ }
+
+#endif // __KERNEL_MODE__
+ return newp;
+}
+
+
+TAny* RHybridHeap::ReAlloc(TAny* aPtr, TInt aSize, TInt aMode )
+{
+
+ aPtr = __GET_DEBUG_DATA_BFR(aPtr);
+ __ALLOC_DEBUG_HEADER(aSize);
+
+ TAny* retval = ReAllocImpl(aPtr, aSize, aMode);
+
+ retval = __GET_USER_DATA_BFR(retval);
+
+#ifdef ENABLE_BTRACE
+ if (iFlags & ETraceAllocs)
+ {
+ if ( retval )
+ {
+ TUint32 traceData[3];
+ traceData[0] = AllocLen(retval);
+ traceData[1] = aSize - __DEBUG_HDR_SIZE;
+ traceData[2] = (TUint32)aPtr;
+ BTraceContextN(BTrace::EHeap, BTrace::EHeapReAlloc,(TUint32)this, (TUint32)retval, traceData, sizeof(traceData));
+ }
+ else
+ BTraceContext12(BTrace::EHeap, BTrace::EHeapReAllocFail, (TUint32)this, (TUint32)aPtr, (TUint32)(aSize - __DEBUG_HDR_SIZE));
+ }
+#endif
+ return retval;
+}
+
+#ifndef __KERNEL_MODE__
+TInt RHybridHeap::Available(TInt& aBiggestBlock) const
+/**
+Gets the total free space currently available on the heap and the space
+available in the largest free block.
+
+Note that this function exists mainly for compatibility reasons. In a modern
+heap implementation such as that present in Symbian it is not appropriate to
+concern oneself with details such as the amount of free memory available on a
+heap and its largeset free block, because the way that a modern heap implementation
+works is not simple. The amount of available virtual memory != physical memory
+and there are multiple allocation strategies used internally, which makes all
+memory usage figures "fuzzy" at best.
+
+In short, if you want to see if there is enough memory available to allocate a
+block of memory, call Alloc() and if it succeeds then there is enough memory!
+Messing around with functions like this is somewhat pointless with modern heap
+allocators.
+
+@param aBiggestBlock On return, contains the space available in the largest
+ free block on the heap. Due to the internals of modern
+ heap implementations, you can probably still allocate a
+ block larger than this!
+
+@return The total free space currently available on the heap. Again, you can
+ probably still allocate more than this!
+*/
+{
+ struct HeapInfo info;
+ Lock();
+ TInt Biggest = GetInfo(&info);
+ aBiggestBlock = __GET_AVAIL_BLOCK_SIZE(Biggest);
+ Unlock();
+ return __GET_AVAIL_BLOCK_SIZE(info.iFreeBytes);
+
+}
+
+TInt RHybridHeap::AllocSize(TInt& aTotalAllocSize) const
+ /**
+ Gets the number of cells allocated on this heap, and the total space
+ allocated to them.
+
+ @param aTotalAllocSize On return, contains the total space allocated
+ to the cells.
+
+ @return The number of cells allocated on this heap.
+*/
+{
+ struct HeapInfo info;
+ Lock();
+ GetInfo(&info);
+ aTotalAllocSize = info.iAllocBytes - __REMOVE_DBG_HDR(info.iAllocN);
+ Unlock();
+ return info.iAllocN;
+}
+
+#endif
+
+TInt RHybridHeap::Extension_(TUint /* aExtensionId */, TAny*& /* a0 */, TAny* /* a1 */)
+{
+ return KErrNotSupported;
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////
+// imported from dla.cpp
+///////////////////////////////////////////////////////////////////////////////
+
+//#include <unistd.h>
+//#define DEBUG_REALLOC
+#ifdef DEBUG_REALLOC
+#include <e32debug.h>
+#endif
+
+inline void RHybridHeap::InitBins(mstate m)
+{
+ /* Establish circular links for iSmallBins */
+ bindex_t i;
+ for (i = 0; i < NSMALLBINS; ++i) {
+ sbinptr bin = SMALLBIN_AT(m,i);
+ bin->iFd = bin->iBk = bin;
+ }
+ }
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+void* RHybridHeap::TmallocLarge(mstate m, size_t nb) {
+ tchunkptr v = 0;
+ size_t rsize = -nb; /* Unsigned negation */
+ tchunkptr t;
+ bindex_t idx;
+ ComputeTreeIndex(nb, idx);
+
+ if ((t = *TREEBIN_AT(m, idx)) != 0)
+ {
+ /* Traverse tree for this bin looking for node with size == nb */
+ size_t sizebits = nb << LEFTSHIFT_FOR_TREE_INDEX(idx);
+ tchunkptr rst = 0; /* The deepest untaken right subtree */
+ for (;;)
+ {
+ tchunkptr rt;
+ size_t trem = CHUNKSIZE(t) - nb;
+ if (trem < rsize)
+ {
+ v = t;
+ if ((rsize = trem) == 0)
+ break;
+ }
+ rt = t->iChild[1];
+ t = t->iChild[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+ if (rt != 0 && rt != t)
+ rst = rt;
+ if (t == 0)
+ {
+ t = rst; /* set t to least subtree holding sizes > nb */
+ break;
+ }
+ sizebits <<= 1;
+ }
+ }
+ if (t == 0 && v == 0)
+ { /* set t to root of next non-empty treebin */
+ binmap_t leftbits = LEFT_BITS(IDX2BIT(idx)) & m->iTreeMap;
+ if (leftbits != 0)
+ {
+ bindex_t i;
+ binmap_t leastbit = LEAST_BIT(leftbits);
+ ComputeBit2idx(leastbit, i);
+ t = *TREEBIN_AT(m, i);
+ }
+ }
+ while (t != 0)
+ { /* Find smallest of tree or subtree */
+ size_t trem = CHUNKSIZE(t) - nb;
+ if (trem < rsize) {
+ rsize = trem;
+ v = t;
+ }
+ t = LEFTMOST_CHILD(t);
+ }
+ /* If iDv is a better fit, return 0 so malloc will use it */
+ if (v != 0 && rsize < (size_t)(m->iDvSize - nb))
+ {
+ if (RTCHECK(OK_ADDRESS(m, v)))
+ { /* split */
+ mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
+ HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
+ if (RTCHECK(OK_NEXT(v, r)))
+ {
+ UnlinkLargeChunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
+ else
+ {
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ InsertChunk(m, r, rsize);
+ }
+ return CHUNK2MEM(v);
+ }
+ }
+ // CORRUPTION_ERROR_ACTION(m);
+ }
+ return 0;
+ }
+
+/* allocate a small request from the best fitting chunk in a treebin */
+void* RHybridHeap::TmallocSmall(mstate m, size_t nb)
+{
+ tchunkptr t, v;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leastbit = LEAST_BIT(m->iTreeMap);
+ ComputeBit2idx(leastbit, i);
+
+ v = t = *TREEBIN_AT(m, i);
+ rsize = CHUNKSIZE(t) - nb;
+
+ while ((t = LEFTMOST_CHILD(t)) != 0)
+ {
+ size_t trem = CHUNKSIZE(t) - nb;
+ if (trem < rsize)
+ {
+ rsize = trem;
+ v = t;
+ }
+ }
+
+ if (RTCHECK(OK_ADDRESS(m, v)))
+ {
+ mchunkptr r = CHUNK_PLUS_OFFSET(v, nb);
+ HEAP_ASSERT(CHUNKSIZE(v) == rsize + nb);
+ if (RTCHECK(OK_NEXT(v, r)))
+ {
+ UnlinkLargeChunk(m, v);
+ if (rsize < MIN_CHUNK_SIZE)
+ SET_INUSE_AND_PINUSE(m, v, (rsize + nb));
+ else
+ {
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, v, nb);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ ReplaceDv(m, r, rsize);
+ }
+ return CHUNK2MEM(v);
+ }
+ }
+ // CORRUPTION_ERROR_ACTION(m);
+ // return 0;
+ }
+
+inline void RHybridHeap::InitTop(mstate m, mchunkptr p, size_t psize)
+{
+ /* Ensure alignment */
+ size_t offset = ALIGN_OFFSET(CHUNK2MEM(p));
+ p = (mchunkptr)((TUint8*)p + offset);
+ psize -= offset;
+ m->iTop = p;
+ m->iTopSize = psize;
+ p->iHead = psize | PINUSE_BIT;
+ /* set size of fake trailing chunk holding overhead space only once */
+ mchunkptr chunkPlusOff = CHUNK_PLUS_OFFSET(p, psize);
+ chunkPlusOff->iHead = TOP_FOOT_SIZE;
+ m->iTrimCheck = KHeapShrinkHysRatio*(iGrowBy>>8);
+}
+
+
+/* Unlink the first chunk from a smallbin */
+inline void RHybridHeap::UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I)
+{
+ mchunkptr F = P->iFd;
+ HEAP_ASSERT(P != B);
+ HEAP_ASSERT(P != F);
+ HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
+ if (B == F)
+ CLEAR_SMALLMAP(M, I);
+ else if (RTCHECK(OK_ADDRESS(M, F)))
+ {
+ B->iFd = F;
+ F->iBk = B;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+}
+/* Link a free chunk into a smallbin */
+inline void RHybridHeap::InsertSmallChunk(mstate M,mchunkptr P, size_t S)
+{
+ bindex_t I = SMALL_INDEX(S);
+ mchunkptr B = SMALLBIN_AT(M, I);
+ mchunkptr F = B;
+ HEAP_ASSERT(S >= MIN_CHUNK_SIZE);
+ if (!SMALLMAP_IS_MARKED(M, I))
+ MARK_SMALLMAP(M, I);
+ else if (RTCHECK(OK_ADDRESS(M, B->iFd)))
+ F = B->iFd;
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ B->iFd = P;
+ F->iBk = P;
+ P->iFd = F;
+ P->iBk = B;
+}
+
+
+inline void RHybridHeap::InsertChunk(mstate M,mchunkptr P,size_t S)
+{
+ if (IS_SMALL(S))
+ InsertSmallChunk(M, P, S);
+ else
+ {
+ tchunkptr TP = (tchunkptr)(P); InsertLargeChunk(M, TP, S);
+ }
+}
+
+inline void RHybridHeap::UnlinkLargeChunk(mstate M,tchunkptr X)
+{
+ tchunkptr XP = X->iParent;
+ tchunkptr R;
+ if (X->iBk != X)
+ {
+ tchunkptr F = X->iFd;
+ R = X->iBk;
+ if (RTCHECK(OK_ADDRESS(M, F)))
+ {
+ F->iBk = R;
+ R->iFd = F;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+ else
+ {
+ tchunkptr* RP;
+ if (((R = *(RP = &(X->iChild[1]))) != 0) ||
+ ((R = *(RP = &(X->iChild[0]))) != 0))
+ {
+ tchunkptr* CP;
+ while ((*(CP = &(R->iChild[1])) != 0) ||
+ (*(CP = &(R->iChild[0])) != 0))
+ {
+ R = *(RP = CP);
+ }
+ if (RTCHECK(OK_ADDRESS(M, RP)))
+ *RP = 0;
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+ }
+ if (XP != 0)
+ {
+ tbinptr* H = TREEBIN_AT(M, X->iIndex);
+ if (X == *H)
+ {
+ if ((*H = R) == 0)
+ CLEAR_TREEMAP(M, X->iIndex);
+ }
+ else if (RTCHECK(OK_ADDRESS(M, XP)))
+ {
+ if (XP->iChild[0] == X)
+ XP->iChild[0] = R;
+ else
+ XP->iChild[1] = R;
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ if (R != 0)
+ {
+ if (RTCHECK(OK_ADDRESS(M, R)))
+ {
+ tchunkptr C0, C1;
+ R->iParent = XP;
+ if ((C0 = X->iChild[0]) != 0)
+ {
+ if (RTCHECK(OK_ADDRESS(M, C0)))
+ {
+ R->iChild[0] = C0;
+ C0->iParent = R;
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ if ((C1 = X->iChild[1]) != 0)
+ {
+ if (RTCHECK(OK_ADDRESS(M, C1)))
+ {
+ R->iChild[1] = C1;
+ C1->iParent = R;
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+ else
+ CORRUPTION_ERROR_ACTION(M);
+ }
+ }
+}
+
+/* Unlink a chunk from a smallbin */
+inline void RHybridHeap::UnlinkSmallChunk(mstate M, mchunkptr P,size_t S)
+{
+ mchunkptr F = P->iFd;
+ mchunkptr B = P->iBk;
+ bindex_t I = SMALL_INDEX(S);
+ HEAP_ASSERT(P != B);
+ HEAP_ASSERT(P != F);
+ HEAP_ASSERT(CHUNKSIZE(P) == SMALL_INDEX2SIZE(I));
+ if (F == B)
+ CLEAR_SMALLMAP(M, I);
+ else if (RTCHECK((F == SMALLBIN_AT(M,I) || OK_ADDRESS(M, F)) &&
+ (B == SMALLBIN_AT(M,I) || OK_ADDRESS(M, B))))
+ {
+ F->iBk = B;
+ B->iFd = F;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ }
+}
+
+inline void RHybridHeap::UnlinkChunk(mstate M, mchunkptr P, size_t S)
+{
+ if (IS_SMALL(S))
+ UnlinkSmallChunk(M, P, S);
+ else
+ {
+ tchunkptr TP = (tchunkptr)(P); UnlinkLargeChunk(M, TP);
+ }
+}
+
+// For DL debug functions
+void RHybridHeap::DoComputeTreeIndex(size_t S, bindex_t& I)
+{
+ ComputeTreeIndex(S, I);
+}
+
+inline void RHybridHeap::ComputeTreeIndex(size_t S, bindex_t& I)
+{
+ size_t X = S >> TREEBIN_SHIFT;
+ if (X == 0)
+ I = 0;
+ else if (X > 0xFFFF)
+ I = NTREEBINS-1;
+ else
+ {
+ unsigned int Y = (unsigned int)X;
+ unsigned int N = ((Y - 0x100) >> 16) & 8;
+ unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
+ N += K;
+ N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
+ K = 14 - N + ((Y <<= K) >> 15);
+ I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));
+ }
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+inline void RHybridHeap::InsertLargeChunk(mstate M,tchunkptr X,size_t S)
+{
+ tbinptr* H;
+ bindex_t I;
+ ComputeTreeIndex(S, I);
+ H = TREEBIN_AT(M, I);
+ X->iIndex = I;
+ X->iChild[0] = X->iChild[1] = 0;
+ if (!TREEMAP_IS_MARKED(M, I))
+ {
+ MARK_TREEMAP(M, I);
+ *H = X;
+ X->iParent = (tchunkptr)H;
+ X->iFd = X->iBk = X;
+ }
+ else
+ {
+ tchunkptr T = *H;
+ size_t K = S << LEFTSHIFT_FOR_TREE_INDEX(I);
+ for (;;)
+ {
+ if (CHUNKSIZE(T) != S) {
+ tchunkptr* C = &(T->iChild[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);
+ K <<= 1;
+ if (*C != 0)
+ T = *C;
+ else if (RTCHECK(OK_ADDRESS(M, C)))
+ {
+ *C = X;
+ X->iParent = T;
+ X->iFd = X->iBk = X;
+ break;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ break;
+ }
+ }
+ else
+ {
+ tchunkptr F = T->iFd;
+ if (RTCHECK(OK_ADDRESS(M, T) && OK_ADDRESS(M, F)))
+ {
+ T->iFd = F->iBk = X;
+ X->iFd = F;
+ X->iBk = T;
+ X->iParent = 0;
+ break;
+ }
+ else
+ {
+ CORRUPTION_ERROR_ACTION(M);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/*
+Unlink steps:
+
+1. If x is a chained node, unlink it from its same-sized iFd/iBk links
+and choose its iBk node as its replacement.
+2. If x was the last node of its size, but not a leaf node, it must
+be replaced with a leaf node (not merely one with an open left or
+right), to make sure that lefts and rights of descendents
+correspond properly to bit masks. We use the rightmost descendent
+of x. We could use any other leaf, but this is easy to locate and
+tends to counteract removal of leftmosts elsewhere, and so keeps
+paths shorter than minimally guaranteed. This doesn't loop much
+because on average a node in a tree is near the bottom.
+3. If x is the base of a chain (i.e., has iParent links) relink
+x's iParent and children to x's replacement (or null if none).
+*/
+
+/* Replace iDv node, binning the old one */
+/* Used only when iDvSize known to be small */
+inline void RHybridHeap::ReplaceDv(mstate M, mchunkptr P, size_t S)
+{
+ size_t DVS = M->iDvSize;
+ if (DVS != 0)
+ {
+ mchunkptr DV = M->iDv;
+ HEAP_ASSERT(IS_SMALL(DVS));
+ InsertSmallChunk(M, DV, DVS);
+ }
+ M->iDvSize = S;
+ M->iDv = P;
+}
+
+
+inline void RHybridHeap::ComputeBit2idx(binmap_t X,bindex_t& I)
+{
+ unsigned int Y = X - 1;
+ unsigned int K = Y >> (16-4) & 16;
+ unsigned int N = K; Y >>= K;
+ N += K = Y >> (8-3) & 8; Y >>= K;
+ N += K = Y >> (4-2) & 4; Y >>= K;
+ N += K = Y >> (2-1) & 2; Y >>= K;
+ N += K = Y >> (1-0) & 1; Y >>= K;
+ I = (bindex_t)(N + Y);
+}
+
+
+
+int RHybridHeap::SysTrim(mstate m, size_t pad)
+{
+ size_t extra = 0;
+
+ if ( IS_INITIALIZED(m) )
+ {
+ pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+ if (m->iTopSize > pad)
+ {
+ extra = Floor(m->iTopSize - pad, iPageSize);
+ if ( (m->iSeg.iSize - extra) < (iMinLength - sizeof(*this)) )
+ {
+ if ( m->iSeg.iSize > (iMinLength - sizeof(*this)) )
+ extra = Floor(m->iSeg.iSize - (iMinLength - sizeof(*this)), iPageSize); /* do not shrink heap below min length */
+ else extra = 0;
+ }
+
+ if ( extra )
+ {
+ Unmap(m->iSeg.iBase + m->iSeg.iSize - extra, extra);
+
+ m->iSeg.iSize -= extra;
+ InitTop(m, m->iTop, m->iTopSize - extra);
+ CHECK_TOP_CHUNK(m, m->iTop);
+ }
+ }
+
+ }
+
+ return extra;
+}
+
+/* Get memory from system using MORECORE */
+
+void* RHybridHeap::SysAlloc(mstate m, size_t nb)
+{
+ HEAP_ASSERT(m->iTop);
+ /* Subtract out existing available iTop space from MORECORE request. */
+// size_t asize = _ALIGN_UP(nb - m->iTopSize + TOP_FOOT_SIZE + SIZE_T_ONE, iGrowBy);
+ TInt asize = _ALIGN_UP(nb - m->iTopSize + SYS_ALLOC_PADDING, iGrowBy); // From DLA version 2.8.4
+
+ char* br = (char*)Map(m->iSeg.iBase+m->iSeg.iSize, asize);
+ if (!br)
+ return 0;
+ HEAP_ASSERT(br == (char*)m->iSeg.iBase+m->iSeg.iSize);
+
+ /* Merge with an existing segment */
+ m->iSeg.iSize += asize;
+ InitTop(m, m->iTop, m->iTopSize + asize);
+
+ if (nb < m->iTopSize)
+ { /* Allocate from new or extended iTop space */
+ size_t rsize = m->iTopSize -= nb;
+ mchunkptr p = m->iTop;
+ mchunkptr r = m->iTop = CHUNK_PLUS_OFFSET(p, nb);
+ r->iHead = rsize | PINUSE_BIT;
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(m, p, nb);
+ CHECK_TOP_CHUNK(m, m->iTop);
+ CHECK_MALLOCED_CHUNK(m, CHUNK2MEM(p), nb);
+ return CHUNK2MEM(p);
+ }
+
+ return 0;
+}
+
+
+void RHybridHeap::InitDlMalloc(size_t capacity, int /*locked*/)
+{
+ memset(GM,0,sizeof(malloc_state));
+ // The maximum amount that can be allocated can be calculated as:-
+ // 2^sizeof(size_t) - sizeof(malloc_state) - TOP_FOOT_SIZE - page Size(all accordingly padded)
+ // If the capacity exceeds this, no allocation will be done.
+ GM->iSeg.iBase = iBase;
+ GM->iSeg.iSize = capacity;
+ InitBins(GM);
+ InitTop(GM, (mchunkptr)iBase, capacity - TOP_FOOT_SIZE);
+}
+
+void* RHybridHeap::DlMalloc(size_t bytes)
+{
+ /*
+ Basic algorithm:
+ If a small request (< 256 bytes minus per-chunk overhead):
+ 1. If one exists, use a remainderless chunk in associated smallbin.
+ (Remainderless means that there are too few excess bytes to
+ represent as a chunk.)
+ 2. If it is big enough, use the iDv chunk, which is normally the
+ chunk adjacent to the one used for the most recent small request.
+ 3. If one exists, split the smallest available chunk in a bin,
+ saving remainder in iDv.
+ 4. If it is big enough, use the iTop chunk.
+ 5. If available, get memory from system and use it
+ Otherwise, for a large request:
+ 1. Find the smallest available binned chunk that fits, and use it
+ if it is better fitting than iDv chunk, splitting if necessary.
+ 2. If better fitting than any binned chunk, use the iDv chunk.
+ 3. If it is big enough, use the iTop chunk.
+ 4. If request size >= mmap threshold, try to directly mmap this chunk.
+ 5. If available, get memory from system and use it
+*/
+ void* mem;
+ size_t nb;
+ if (bytes <= MAX_SMALL_REQUEST)
+ {
+ bindex_t idx;
+ binmap_t smallbits;
+ nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : PAD_REQUEST(bytes);
+ idx = SMALL_INDEX(nb);
+ smallbits = GM->iSmallMap >> idx;
+
+ if ((smallbits & 0x3U) != 0)
+ { /* Remainderless fit to a smallbin. */
+ mchunkptr b, p;
+ idx += ~smallbits & 1; /* Uses next bin if idx empty */
+ b = SMALLBIN_AT(GM, idx);
+ p = b->iFd;
+ HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(idx));
+ UnlinkFirstSmallChunk(GM, b, p, idx);
+ SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(idx));
+ mem = CHUNK2MEM(p);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ else if (nb > GM->iDvSize)
+ {
+ if (smallbits != 0)
+ { /* Use chunk in next nonempty smallbin */
+ mchunkptr b, p, r;
+ size_t rsize;
+ bindex_t i;
+ binmap_t leftbits = (smallbits << idx) & LEFT_BITS(IDX2BIT(idx));
+ binmap_t leastbit = LEAST_BIT(leftbits);
+ ComputeBit2idx(leastbit, i);
+ b = SMALLBIN_AT(GM, i);
+ p = b->iFd;
+ HEAP_ASSERT(CHUNKSIZE(p) == SMALL_INDEX2SIZE(i));
+ UnlinkFirstSmallChunk(GM, b, p, i);
+ rsize = SMALL_INDEX2SIZE(i) - nb;
+ /* Fit here cannot be remainderless if 4byte sizes */
+ if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+ SET_INUSE_AND_PINUSE(GM, p, SMALL_INDEX2SIZE(i));
+ else
+ {
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
+ r = CHUNK_PLUS_OFFSET(p, nb);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ ReplaceDv(GM, r, rsize);
+ }
+ mem = CHUNK2MEM(p);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ else if (GM->iTreeMap != 0 && (mem = TmallocSmall(GM, nb)) != 0)
+ {
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+ }
+ }
+ else if (bytes >= MAX_REQUEST)
+ nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+ else
+ {
+ nb = PAD_REQUEST(bytes);
+ if (GM->iTreeMap != 0 && (mem = TmallocLarge(GM, nb)) != 0)
+ {
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+ }
+
+ if (nb <= GM->iDvSize)
+ {
+ size_t rsize = GM->iDvSize - nb;
+ mchunkptr p = GM->iDv;
+ if (rsize >= MIN_CHUNK_SIZE)
+ { /* split iDv */
+ mchunkptr r = GM->iDv = CHUNK_PLUS_OFFSET(p, nb);
+ GM->iDvSize = rsize;
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(r, rsize);
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
+ }
+ else
+ { /* exhaust iDv */
+ size_t dvs = GM->iDvSize;
+ GM->iDvSize = 0;
+ GM->iDv = 0;
+ SET_INUSE_AND_PINUSE(GM, p, dvs);
+ }
+ mem = CHUNK2MEM(p);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ else if (nb < GM->iTopSize)
+ { /* Split iTop */
+ size_t rsize = GM->iTopSize -= nb;
+ mchunkptr p = GM->iTop;
+ mchunkptr r = GM->iTop = CHUNK_PLUS_OFFSET(p, nb);
+ r->iHead = rsize | PINUSE_BIT;
+ SET_SIZE_AND_PINUSE_OF_INUSE_CHUNK(GM, p, nb);
+ mem = CHUNK2MEM(p);
+ CHECK_TOP_CHUNK(GM, GM->iTop);
+ CHECK_MALLOCED_CHUNK(GM, mem, nb);
+ return mem;
+ }
+
+ return SysAlloc(GM, nb);
+}
+
+
+void RHybridHeap::DlFree(void* mem)
+{
+ /*
+ Consolidate freed chunks with preceding or succeeding bordering
+ free chunks, if they exist, and then place in a bin. Intermixed
+ with special cases for iTop, iDv, mmapped chunks, and usage errors.
+*/
+ mchunkptr p = MEM2CHUNK(mem);
+ CHECK_INUSE_CHUNK(GM, p);
+ if (RTCHECK(OK_ADDRESS(GM, p) && OK_CINUSE(p)))
+ {
+ size_t psize = CHUNKSIZE(p);
+ mchunkptr next = CHUNK_PLUS_OFFSET(p, psize);
+ if (!PINUSE(p))
+ {
+ size_t prevsize = p->iPrevFoot;
+ mchunkptr prev = CHUNK_MINUS_OFFSET(p, prevsize);
+ psize += prevsize;
+ p = prev;
+ if (RTCHECK(OK_ADDRESS(GM, prev)))
+ { /* consolidate backward */
+ if (p != GM->iDv)
+ {
+ UnlinkChunk(GM, p, prevsize);
+ }
+ else if ((next->iHead & INUSE_BITS) == INUSE_BITS)
+ {
+ GM->iDvSize = psize;
+ SET_FREE_WITH_PINUSE(p, psize, next);
+ return;
+ }
+ }
+ else
+ {
+ USAGE_ERROR_ACTION(GM, p);
+ return;
+ }
+ }
+
+ if (RTCHECK(OK_NEXT(p, next) && OK_PINUSE(next)))
+ {
+ if (!CINUSE(next))
+ { /* consolidate forward */
+ if (next == GM->iTop)
+ {
+ size_t tsize = GM->iTopSize += psize;
+ GM->iTop = p;
+ p->iHead = tsize | PINUSE_BIT;
+ if (p == GM->iDv)
+ {
+ GM->iDv = 0;
+ GM->iDvSize = 0;
+ }
+ if ( !IS_FIXED_HEAP && SHOULD_TRIM(GM, tsize) )
+ SysTrim(GM, 0);
+ return;
+ }
+ else if (next == GM->iDv)
+ {
+ size_t dsize = GM->iDvSize += psize;
+ GM->iDv = p;
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, dsize);
+ return;
+ }
+ else
+ {
+ size_t nsize = CHUNKSIZE(next);
+ psize += nsize;
+ UnlinkChunk(GM, next, nsize);
+ SET_SIZE_AND_PINUSE_OF_FREE_CHUNK(p, psize);
+ if (p == GM->iDv)
+ {
+ GM->iDvSize = psize;
+ return;
+ }
+ }
+ }
+ else
+ SET_FREE_WITH_PINUSE(p, psize, next);
+ InsertChunk(GM, p, psize);
+ CHECK_FREE_CHUNK(GM, p);
+ return;
+ }
+ }
+}
+
+
+void* RHybridHeap::DlRealloc(void* oldmem, size_t bytes, TInt mode)
+{
+ mchunkptr oldp = MEM2CHUNK(oldmem);
+ size_t oldsize = CHUNKSIZE(oldp);
+ mchunkptr next = CHUNK_PLUS_OFFSET(oldp, oldsize);
+ mchunkptr newp = 0;
+ void* extra = 0;
+
+ /* Try to either shrink or extend into iTop. Else malloc-copy-free */
+
+ if (RTCHECK(OK_ADDRESS(GM, oldp) && OK_CINUSE(oldp) &&
+ OK_NEXT(oldp, next) && OK_PINUSE(next)))
+ {
+ size_t nb = REQUEST2SIZE(bytes);
+ if (oldsize >= nb) { /* already big enough */
+ size_t rsize = oldsize - nb;
+ newp = oldp;
+ if (rsize >= MIN_CHUNK_SIZE)
+ {
+ mchunkptr remainder = CHUNK_PLUS_OFFSET(newp, nb);
+ SET_INUSE(GM, newp, nb);
+// SET_INUSE(GM, remainder, rsize);
+ SET_INUSE_AND_PINUSE(GM, remainder, rsize); // corrected in original DLA version V2.8.4
+ extra = CHUNK2MEM(remainder);
+ }
+ }
+ else if (next == GM->iTop && oldsize + GM->iTopSize > nb)
+ {
+ /* Expand into iTop */
+ size_t newsize = oldsize + GM->iTopSize;
+ size_t newtopsize = newsize - nb;
+ mchunkptr newtop = CHUNK_PLUS_OFFSET(oldp, nb);
+ SET_INUSE(GM, oldp, nb);
+ newtop->iHead = newtopsize |PINUSE_BIT;
+ GM->iTop = newtop;
+ GM->iTopSize = newtopsize;
+ newp = oldp;
+ }
+ }
+ else
+ {
+ USAGE_ERROR_ACTION(GM, oldmem);
+ }
+
+ if (newp != 0)
+ {
+ if (extra != 0)
+ {
+ DlFree(extra);
+ }
+ CHECK_INUSE_CHUNK(GM, newp);
+ return CHUNK2MEM(newp);
+ }
+ else
+ {
+ if ( mode & ENeverMove )
+ return 0; // cannot move
+ void* newmem = DlMalloc(bytes);
+ if (newmem != 0)
+ {
+ size_t oc = oldsize - OVERHEAD_FOR(oldp);
+ memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
+ DlFree(oldmem);
+ }
+ return newmem;
+ }
+ // return 0;
+}
+
+size_t RHybridHeap::DlInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ TInt max = ((GM->iTopSize-1) & ~CHUNK_ALIGN_MASK) - CHUNK_OVERHEAD;
+ if ( max < 0 )
+ max = 0;
+ else ++i->iFreeN; // iTop always free
+ i->iFreeBytes += max;
+
+ Walk(wi, GM->iTop, max, EGoodFreeCell, EDougLeaAllocator); // Introduce DL iTop buffer to the walk function
+
+ for (mchunkptr q = ALIGN_AS_CHUNK(GM->iSeg.iBase); q != GM->iTop; q = NEXT_CHUNK(q))
+ {
+ TInt sz = CHUNKSIZE(q);
+ if (!CINUSE(q))
+ {
+ if ( sz > max )
+ max = sz;
+ i->iFreeBytes += sz;
+ ++i->iFreeN;
+ Walk(wi, CHUNK2MEM(q), sz, EGoodFreeCell, EDougLeaAllocator); // Introduce DL free buffer to the walk function
+ }
+ else
+ {
+ i->iAllocBytes += sz - CHUNK_OVERHEAD;
+ ++i->iAllocN;
+ Walk(wi, CHUNK2MEM(q), (sz- CHUNK_OVERHEAD), EGoodAllocatedCell, EDougLeaAllocator); // Introduce DL allocated buffer to the walk function
+ }
+ }
+ return max; // return largest available chunk size
+}
+
+//
+// get statistics about the state of the allocator
+//
+TInt RHybridHeap::GetInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ memset(i,0,sizeof(HeapInfo));
+ i->iFootprint = iChunkSize;
+ i->iMaxSize = iMaxLength;
+#ifndef __KERNEL_MODE__
+ PagedInfo(i, wi);
+ SlabInfo(i, wi);
+#endif
+ return DlInfo(i,wi);
+}
+
+//
+// Methods to commit/decommit memory pages from chunk
+//
+
+
+void* RHybridHeap::Map(void* p, TInt sz)
+//
+// allocate pages in the chunk
+// if p is NULL, Find an allocate the required number of pages (which must lie in the lower half)
+// otherwise commit the pages specified
+//
+{
+ HEAP_ASSERT(sz > 0);
+
+ if ( iChunkSize + sz > iMaxLength)
+ return 0;
+
+#ifdef __KERNEL_MODE__
+
+ TInt r = ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset + sz);
+ if (r < 0)
+ return 0;
+
+ iChunkSize += sz;
+
+#else
+
+ RChunk chunk;
+ chunk.SetHandle(iChunkHandle);
+ if ( p )
+ {
+ TInt r;
+ if ( iUseAdjust )
+ r = chunk.Adjust(iChunkSize + sz);
+ else
+ {
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+ HEAP_ASSERT(p == Floor(p, iPageSize));
+ r = chunk.Commit(iOffset + PtrDiff(p, this),sz);
+ }
+ if (r < 0)
+ return 0;
+ }
+ else
+ {
+ TInt r = chunk.Allocate(sz);
+ if (r < 0)
+ return 0;
+ if (r > iOffset)
+ {
+ // can't allow page allocations in DL zone
+ chunk.Decommit(r, sz);
+ return 0;
+ }
+ p = Offset(this, r - iOffset);
+ }
+ iChunkSize += sz;
+
+ if (iChunkSize >= iSlabInitThreshold)
+ { // set up slab system now that heap is large enough
+ SlabConfig(iSlabConfigBits);
+ iSlabInitThreshold = KMaxTInt32;
+ }
+
+#endif // __KERNEL_MODE__
+
+#ifdef ENABLE_BTRACE
+ if(iChunkSize > iHighWaterMark)
+ {
+ iHighWaterMark = Ceiling(iChunkSize,16*iPageSize);
+ TUint32 traceData[6];
+ traceData[0] = iChunkHandle;
+ traceData[1] = iMinLength;
+ traceData[2] = iMaxLength;
+ traceData[3] = sz;
+ traceData[4] = iChunkSize;
+ traceData[5] = iHighWaterMark;
+ BTraceContextN(BTrace::ETest1, 90, (TUint32)this, 33, traceData, sizeof(traceData));
+ }
+#endif
+
+ return p;
+}
+
+void RHybridHeap::Unmap(void* p, TInt sz)
+{
+ HEAP_ASSERT(sz > 0);
+
+#ifdef __KERNEL_MODE__
+
+ (void)p;
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+#if defined(_DEBUG)
+ TInt r =
+#endif
+ ((DChunk*)iChunkHandle)->Adjust(iChunkSize + iOffset - sz);
+ HEAP_ASSERT(r >= 0);
+
+#else
+
+ RChunk chunk;
+ chunk.SetHandle(iChunkHandle);
+ if ( iUseAdjust )
+ {
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+#if defined(_DEBUG)
+ TInt r =
+#endif
+ chunk.Adjust(iChunkSize - sz);
+ HEAP_ASSERT(r >= 0);
+ }
+ else
+ {
+ HEAP_ASSERT(sz == Ceiling(sz, iPageSize));
+ HEAP_ASSERT(p == Floor(p, iPageSize));
+#if defined(_DEBUG)
+ TInt r =
+#endif
+ chunk.Decommit(PtrDiff(p, Offset(this,-iOffset)), sz);
+ HEAP_ASSERT(r >= 0);
+ }
+#endif // __KERNEL_MODE__
+
+ iChunkSize -= sz;
+}
+
+
+#ifndef __KERNEL_MODE__
+//
+// Slab allocator code
+//
+
+//inline slab* slab::SlabFor(void* p)
+slab* slab::SlabFor( const void* p)
+{
+ return (slab*)(Floor(p, SLABSIZE));
+}
+
+//
+// Remove slab s from its tree/heap (not necessarily the root), preserving the address order
+// invariant of the heap
+//
+void RHybridHeap::TreeRemove(slab* s)
+{
+ slab** r = s->iParent;
+ slab* c1 = s->iChild1;
+ slab* c2 = s->iChild2;
+ for (;;)
+ {
+ if (!c2)
+ {
+ *r = c1;
+ if (c1)
+ c1->iParent = r;
+ return;
+ }
+ if (!c1)
+ {
+ *r = c2;
+ c2->iParent = r;
+ return;
+ }
+ if (c1 > c2)
+ {
+ slab* c3 = c1;
+ c1 = c2;
+ c2 = c3;
+ }
+ slab* newc2 = c1->iChild2;
+ *r = c1;
+ c1->iParent = r;
+ c1->iChild2 = c2;
+ c2->iParent = &c1->iChild2;
+ s = c1;
+ c1 = s->iChild1;
+ c2 = newc2;
+ r = &s->iChild1;
+ }
+}
+//
+// Insert slab s into the tree/heap rooted at r, preserving the address ordering
+// invariant of the heap
+//
+void RHybridHeap::TreeInsert(slab* s,slab** r)
+{
+ slab* n = *r;
+ for (;;)
+ {
+ if (!n)
+ { // tree empty
+ *r = s;
+ s->iParent = r;
+ s->iChild1 = s->iChild2 = 0;
+ break;
+ }
+ if (s < n)
+ { // insert between iParent and n
+ *r = s;
+ s->iParent = r;
+ s->iChild1 = n;
+ s->iChild2 = 0;
+ n->iParent = &s->iChild1;
+ break;
+ }
+ slab* c1 = n->iChild1;
+ slab* c2 = n->iChild2;
+ if ((c1 - 1) > (c2 - 1))
+ {
+ r = &n->iChild1;
+ n = c1;
+ }
+ else
+ {
+ r = &n->iChild2;
+ n = c2;
+ }
+ }
+}
+
+void* RHybridHeap::AllocNewSlab(slabset& allocator)
+//
+// Acquire and initialise a new slab, returning a cell from the slab
+// The strategy is:
+// 1. Use the lowest address free slab, if available. This is done by using the lowest slab
+// in the page at the root of the iPartialPage heap (which is address ordered). If the
+// is now fully used, remove it from the iPartialPage heap.
+// 2. Allocate a new page for iSlabs if no empty iSlabs are available
+//
+{
+ page* p = page::PageFor(iPartialPage);
+ if (!p)
+ return AllocNewPage(allocator);
+
+ unsigned h = p->iSlabs[0].iHeader;
+ unsigned pagemap = SlabHeaderPagemap(h);
+ HEAP_ASSERT(&p->iSlabs[HIBIT(pagemap)] == iPartialPage);
+
+ unsigned slabix = LOWBIT(pagemap);
+ p->iSlabs[0].iHeader = h &~ (0x100<<slabix);
+ if (!(pagemap &~ (1<<slabix)))
+ {
+ TreeRemove(iPartialPage); // last free slab in page
+ }
+
+ return InitNewSlab(allocator, &p->iSlabs[slabix]);
+}
+
+/**Defination of this functionis not there in proto code***/
+#if 0
+void RHybridHeap::partial_insert(slab* s)
+{
+ // slab has had first cell freed and needs to be linked back into iPartial tree
+ slabset& ss = iSlabAlloc[iSizeMap[s->clz]];
+
+ HEAP_ASSERT(s->used == slabfull);
+ s->used = ss.fulluse - s->clz; // full-1 loading
+ TreeInsert(s,&ss.iPartial);
+ CHECKTREE(&ss.iPartial);
+}
+/**Defination of this functionis not there in proto code***/
+#endif
+
+void* RHybridHeap::AllocNewPage(slabset& allocator)
+//
+// Acquire and initialise a new page, returning a cell from a new slab
+// The iPartialPage tree is empty (otherwise we'd have used a slab from there)
+// The iPartialPage link is put in the highest addressed slab in the page, and the
+// lowest addressed slab is used to fulfill the allocation request
+//
+{
+ page* p = iSparePage;
+ if (p)
+ iSparePage = 0;
+ else
+ {
+ p = static_cast<page*>(Map(0, iPageSize));
+ if (!p)
+ return 0;
+ }
+ HEAP_ASSERT(p == Floor(p, iPageSize));
+ // Store page allocated for slab into paged_bitmap (for RHybridHeap::Reset())
+ if (!PagedSetSize(p, iPageSize))
+ {
+ Unmap(p, iPageSize);
+ return 0;
+ }
+ p->iSlabs[0].iHeader = ((1<<3) + (1<<2) + (1<<1))<<8; // set pagemap
+ p->iSlabs[3].iParent = &iPartialPage;
+ p->iSlabs[3].iChild1 = p->iSlabs[3].iChild2 = 0;
+ iPartialPage = &p->iSlabs[3];
+ return InitNewSlab(allocator,&p->iSlabs[0]);
+}
+
+void RHybridHeap::FreePage(page* p)
+//
+// Release an unused page to the OS
+// A single page is cached for reuse to reduce thrashing
+// the OS allocator.
+//
+{
+ HEAP_ASSERT(Ceiling(p, iPageSize) == p);
+ if (!iSparePage)
+ {
+ iSparePage = p;
+ return;
+ }
+
+ // unmapped slab page must be cleared from paged_bitmap, too
+ PagedZapSize(p, iPageSize); // clear page map
+
+ Unmap(p, iPageSize);
+}
+
+void RHybridHeap::FreeSlab(slab* s)
+//
+// Release an empty slab to the slab manager
+// The strategy is:
+// 1. The page containing the slab is checked to see the state of the other iSlabs in the page by
+// inspecting the pagemap field in the iHeader of the first slab in the page.
+// 2. The pagemap is updated to indicate the new unused slab
+// 3. If this is the only unused slab in the page then the slab iHeader is used to add the page to
+// the iPartialPage tree/heap
+// 4. If all the iSlabs in the page are now unused the page is release back to the OS
+// 5. If this slab has a higher address than the one currently used to track this page in
+// the iPartialPage heap, the linkage is moved to the new unused slab
+//
+{
+ TreeRemove(s);
+ CHECKTREE(s->iParent);
+ HEAP_ASSERT(SlabHeaderUsedm4(s->iHeader) == SlabHeaderSize(s->iHeader)-4);
+
+ page* p = page::PageFor(s);
+ unsigned h = p->iSlabs[0].iHeader;
+ int slabix = s - &p->iSlabs[0];
+ unsigned pagemap = SlabHeaderPagemap(h);
+ p->iSlabs[0].iHeader = h | (0x100<<slabix);
+ if (pagemap == 0)
+ { // page was full before, use this slab as link in empty heap
+ TreeInsert(s, &iPartialPage);
+ }
+ else
+ { // Find the current empty-link slab
+ slab* sl = &p->iSlabs[HIBIT(pagemap)];
+ pagemap ^= (1<<slabix);
+ if (pagemap == 0xf)
+ { // page is now empty so recycle page to os
+ TreeRemove(sl);
+ FreePage(p);
+ return;
+ }
+ // ensure the free list link is in highest address slab in page
+ if (s > sl)
+ { // replace current link with new one. Address-order tree so position stays the same
+ slab** r = sl->iParent;
+ slab* c1 = sl->iChild1;
+ slab* c2 = sl->iChild2;
+ s->iParent = r;
+ s->iChild1 = c1;
+ s->iChild2 = c2;
+ *r = s;
+ if (c1)
+ c1->iParent = &s->iChild1;
+ if (c2)
+ c2->iParent = &s->iChild2;
+ }
+ CHECK(if (s < sl) s=sl);
+ }
+ HEAP_ASSERT(SlabHeaderPagemap(p->iSlabs[0].iHeader) != 0);
+ HEAP_ASSERT(HIBIT(SlabHeaderPagemap(p->iSlabs[0].iHeader)) == unsigned(s - &p->iSlabs[0]));
+}
+
+
+void RHybridHeap::SlabInit()
+{
+ iSlabThreshold=0;
+ iPartialPage = 0;
+ iFullSlab = 0;
+ iSparePage = 0;
+ memset(&iSizeMap[0],0xff,sizeof(iSizeMap));
+ memset(&iSlabAlloc[0],0,sizeof(iSlabAlloc));
+}
+
+void RHybridHeap::SlabConfig(unsigned slabbitmap)
+{
+ HEAP_ASSERT((slabbitmap & ~EOkBits) == 0);
+ HEAP_ASSERT(MAXSLABSIZE <= 60);
+
+ unsigned int ix = 0xff;
+ unsigned int bit = 1<<((MAXSLABSIZE>>2)-1);
+ for (int sz = MAXSLABSIZE; sz >= 0; sz -= 4, bit >>= 1)
+ {
+ if (slabbitmap & bit)
+ {
+ if (ix == 0xff)
+ iSlabThreshold=sz+1;
+ ix = (sz>>2)-1;
+ }
+ iSizeMap[sz>>2] = (TUint8) ix;
+ }
+}
+
+
+void* RHybridHeap::SlabAllocate(slabset& ss)
+//
+// Allocate a cell from the given slabset
+// Strategy:
+// 1. Take the partially full slab at the iTop of the heap (lowest address).
+// 2. If there is no such slab, allocate from a new slab
+// 3. If the slab has a non-empty freelist, pop the cell from the front of the list and update the slab
+// 4. Otherwise, if the slab is not full, return the cell at the end of the currently used region of
+// the slab, updating the slab
+// 5. Otherwise, release the slab from the iPartial tree/heap, marking it as 'floating' and go back to
+// step 1
+//
+{
+ for (;;)
+ {
+ slab *s = ss.iPartial;
+ if (!s)
+ break;
+ unsigned h = s->iHeader;
+ unsigned free = h & 0xff; // extract free cell positioning
+ if (free)
+ {
+ HEAP_ASSERT(((free<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
+ void* p = Offset(s,free<<2);
+ free = *(unsigned char*)p; // get next pos in free list
+ h += (h&0x3C000)<<6; // update usedm4
+ h &= ~0xff;
+ h |= free; // update freelist
+ s->iHeader = h;
+ HEAP_ASSERT(SlabHeaderFree(h) == 0 || ((SlabHeaderFree(h)<<2)-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
+ HEAP_ASSERT(SlabHeaderUsedm4(h) <= 0x3F8u);
+ HEAP_ASSERT((SlabHeaderUsedm4(h)+4)%SlabHeaderSize(h) == 0);
+ return p;
+ }
+ unsigned h2 = h + ((h&0x3C000)<<6);
+// if (h2 < 0xfc00000)
+ if (h2 < MAXUSEDM4BITS)
+ {
+ HEAP_ASSERT((SlabHeaderUsedm4(h2)+4)%SlabHeaderSize(h2) == 0);
+ s->iHeader = h2;
+ return Offset(s,(h>>18) + sizeof(unsigned) + sizeof(slabhdr));
+ }
+ h |= FLOATING_BIT; // mark the slab as full-floating
+ s->iHeader = h;
+ TreeRemove(s);
+ slab* c = iFullSlab; // add to full list
+ iFullSlab = s;
+ s->iParent = &iFullSlab;
+ s->iChild1 = c;
+ s->iChild2 = 0;
+ if (c)
+ c->iParent = &s->iChild1;
+
+ CHECKTREE(&ss.iPartial);
+ // go back and try the next slab...
+ }
+ // no iPartial iSlabs found, so allocate from a new slab
+ return AllocNewSlab(ss);
+}
+
+void RHybridHeap::SlabFree(void* p)
+//
+// Free a cell from the slab allocator
+// Strategy:
+// 1. Find the containing slab (round down to nearest 1KB boundary)
+// 2. Push the cell into the slab's freelist, and update the slab usage count
+// 3. If this is the last allocated cell, free the slab to the main slab manager
+// 4. If the slab was full-floating then insert the slab in it's respective iPartial tree
+//
+{
+ HEAP_ASSERT(LowBits(p,3)==0);
+ slab* s = slab::SlabFor(p);
+ CHECKSLAB(s,ESlabAllocator,p);
+ CHECKSLABBFR(s,p);
+
+ unsigned pos = LowBits(p, SLABSIZE);
+ unsigned h = s->iHeader;
+ HEAP_ASSERT(SlabHeaderUsedm4(h) != 0x3fC); // slab is empty already
+ HEAP_ASSERT((pos-sizeof(slabhdr))%SlabHeaderSize(h) == 0);
+ *(unsigned char*)p = (unsigned char)h;
+ h &= ~0xFF;
+ h |= (pos>>2);
+ unsigned size = h & 0x3C000;
+ if (int(h) >= 0)
+ {
+ h -= size<<6;
+ if (int(h)>=0)
+ {
+ s->iHeader = h;
+ return;
+ }
+ FreeSlab(s);
+ return;
+ }
+ h -= size<<6;
+ h &= ~FLOATING_BIT;
+ s->iHeader = h;
+ slab** full = s->iParent; // remove from full list
+ slab* c = s->iChild1;
+ *full = c;
+ if (c)
+ c->iParent = full;
+
+ slabset& ss = iSlabAlloc[iSizeMap[size>>14]];
+ TreeInsert(s,&ss.iPartial);
+ CHECKTREE(&ss.iPartial);
+}
+
+void* RHybridHeap::InitNewSlab(slabset& allocator, slab* s)
+//
+// initialise an empty slab for this allocator and return the fist cell
+// pre-condition: the slabset has no iPartial iSlabs for allocation
+//
+{
+ HEAP_ASSERT(allocator.iPartial==0);
+ TInt size = 4 + ((&allocator-&iSlabAlloc[0])<<2); // infer size from slab allocator address
+ unsigned h = s->iHeader & 0xF00; // preserve pagemap only
+ h |= (size<<12); // set size
+ h |= (size-4)<<18; // set usedminus4 to one object minus 4
+ s->iHeader = h;
+ allocator.iPartial = s;
+ s->iParent = &allocator.iPartial;
+ s->iChild1 = s->iChild2 = 0;
+ return Offset(s,sizeof(slabhdr));
+}
+
+const unsigned char slab_bitcount[16] = {0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4};
+
+const unsigned char slab_ext_frag[16] =
+{
+ 0,
+ 16 + (1008 % 4),
+ 16 + (1008 % 8),
+ 16 + (1008 % 12),
+ 16 + (1008 % 16),
+ 16 + (1008 % 20),
+ 16 + (1008 % 24),
+ 16 + (1008 % 28),
+ 16 + (1008 % 32),
+ 16 + (1008 % 36),
+ 16 + (1008 % 40),
+ 16 + (1008 % 44),
+ 16 + (1008 % 48),
+ 16 + (1008 % 52),
+ 16 + (1008 % 56),
+ 16 + (1008 % 60)
+};
+
+void RHybridHeap::TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi)
+{
+ // iterative walk around the tree at root
+
+ slab* s = *root;
+ if (!s)
+ return;
+
+ for (;;)
+ {
+ slab* c;
+ while ((c = s->iChild1) != 0)
+ s = c; // walk down left side to end
+ for (;;)
+ {
+ f(s, i, wi);
+ c = s->iChild2;
+ if (c)
+ { // one step down right side, now try and walk down left
+ s = c;
+ break;
+ }
+ for (;;)
+ { // loop to walk up right side
+ slab** pp = s->iParent;
+ if (pp == root)
+ return;
+ s = slab::SlabFor(pp);
+ if (pp == &s->iChild1)
+ break;
+ }
+ }
+ }
+}
+
+void RHybridHeap::SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
+{
+ Walk(wi, s, SLABSIZE, EGoodFreeCell, EEmptySlab); // Introduce an empty slab to the walk function
+ int nslab = slab_bitcount[SlabHeaderPagemap(page::PageFor(s)->iSlabs[0].iHeader)];
+ i->iFreeN += nslab;
+ i->iFreeBytes += nslab << SLABSHIFT;
+}
+
+void RHybridHeap::SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
+{
+ Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EPartialFullSlab); // Introduce a full slab to the walk function
+ unsigned h = s->iHeader;
+ unsigned used = SlabHeaderUsedm4(h)+4;
+ unsigned size = SlabHeaderSize(h);
+ unsigned free = 1024 - slab_ext_frag[size>>2] - used;
+ i->iFreeN += (free/size);
+ i->iFreeBytes += free;
+ i->iAllocN += (used/size);
+ i->iAllocBytes += used;
+}
+
+void RHybridHeap::SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi)
+{
+ Walk(wi, s, SLABSIZE, EGoodAllocatedCell, EFullSlab); // Introduce a full slab to the walk function
+ unsigned h = s->iHeader;
+ unsigned used = SlabHeaderUsedm4(h)+4;
+ unsigned size = SlabHeaderSize(h);
+ HEAP_ASSERT(1024 - slab_ext_frag[size>>2] - used == 0);
+ i->iAllocN += (used/size);
+ i->iAllocBytes += used;
+}
+
+void RHybridHeap::SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ if (iSparePage)
+ {
+ i->iFreeBytes += iPageSize;
+ i->iFreeN = 4;
+ Walk(wi, iSparePage, iPageSize, EGoodFreeCell, ESlabSpare); // Introduce Slab spare page to the walk function
+ }
+ TreeWalk(&iFullSlab, &SlabFullInfo, i, wi);
+ for (int ix = 0; ix < (MAXSLABSIZE>>2); ++ix)
+ TreeWalk(&iSlabAlloc[ix].iPartial, &SlabPartialInfo, i, wi);
+ TreeWalk(&iPartialPage, &SlabEmptyInfo, i, wi);
+}
+
+
+//
+// Bitmap class implementation for large page allocator
+//
+inline unsigned char* paged_bitmap::Addr() const {return iBase;}
+inline unsigned paged_bitmap::Size() const {return iNbits;}
+//
+
+void paged_bitmap::Init(unsigned char* p, unsigned size, unsigned bit)
+{
+ iBase = p;
+ iNbits=size;
+ int bytes=Ceiling(size,8)>>3;
+ memset(p,bit?0xff:0,bytes);
+}
+
+inline void paged_bitmap::Set(unsigned ix, unsigned bit)
+{
+ if (bit)
+ iBase[ix>>3] |= (1<<(ix&7));
+ else
+ iBase[ix>>3] &= ~(1<<(ix&7));
+}
+
+inline unsigned paged_bitmap::operator[](unsigned ix) const
+{
+ return 1U&(iBase[ix>>3] >> (ix&7));
+}
+
+void paged_bitmap::Setn(unsigned ix, unsigned len, unsigned bit)
+{
+ int l=len;
+ while (--l>=0)
+ Set(ix++,bit);
+}
+
+void paged_bitmap::Set(unsigned ix, unsigned len, unsigned val)
+{
+ int l=len;
+ while (--l>=0)
+ {
+ Set(ix++,val&1);
+ val>>=1;
+ }
+}
+
+unsigned paged_bitmap::Bits(unsigned ix, unsigned len) const
+{
+ int l=len;
+ unsigned val=0;
+ unsigned bit=0;
+ while (--l>=0)
+ val |= (*this)[ix++]<<bit++;
+ return val;
+}
+
+bool paged_bitmap::Is(unsigned ix, unsigned len, unsigned bit) const
+{
+ unsigned i2 = ix+len;
+ if (i2 > iNbits)
+ return false;
+ for (;;)
+ {
+ if ((*this)[ix] != bit)
+ return false;
+ if (++ix==i2)
+ return true;
+ }
+}
+
+int paged_bitmap::Find(unsigned start, unsigned bit) const
+{
+ if (start<iNbits) do
+ {
+ if ((*this)[start]==bit)
+ return start;
+ } while (++start<iNbits);
+ return -1;
+}
+
+
+//
+// Page allocator code
+//
+void RHybridHeap::PagedInit(TInt aPagePower)
+{
+ if (aPagePower > 0)
+ {
+ if (aPagePower < MINPAGEPOWER)
+ aPagePower = MINPAGEPOWER;
+ }
+ else aPagePower = 31;
+
+ iPageThreshold = aPagePower;
+ /*-------------------------------------------------------------
+ * Initialize page bitmap
+ *-------------------------------------------------------------*/
+ iPageMap.Init((unsigned char*)&iBitMapBuffer, MAXSMALLPAGEBITS, 0);
+}
+
+void* RHybridHeap::PagedAllocate(unsigned size)
+{
+ TInt nbytes = Ceiling(size, iPageSize);
+ void* p = Map(0, nbytes);
+ if (!p)
+ return 0;
+ if (!PagedSetSize(p, nbytes))
+ {
+ Unmap(p, nbytes);
+ return 0;
+ }
+ return p;
+}
+
+void* RHybridHeap::PagedReallocate(void* p, unsigned size, TInt mode)
+{
+
+ HEAP_ASSERT(Ceiling(p, iPageSize) == p);
+ unsigned nbytes = Ceiling(size, iPageSize);
+
+ unsigned osize = PagedSize(p);
+ if ( nbytes == 0 ) // Special case to handle shrinking below min page threshold
+ nbytes = Min((1 << MINPAGEPOWER), osize);
+
+ if (osize == nbytes)
+ return p;
+
+ if (nbytes < osize)
+ { // shrink in place, unmap final pages and rewrite the pagemap
+ Unmap(Offset(p, nbytes), osize-nbytes);
+ // zap old code and then write new code (will not fail)
+ PagedZapSize(p, osize);
+
+ TBool check = PagedSetSize(p, nbytes);
+ __ASSERT_ALWAYS(check, HEAP_PANIC(ETHeapBadCellAddress));
+
+ return p;
+ }
+
+ // nbytes > osize
+ // try and extend current region first
+
+ void* newp = Map(Offset(p, osize), nbytes-osize);
+ if (newp)
+ { // In place growth. Possibility that pagemap may have to grow AND then fails
+ if (!PagedSetSize(p, nbytes))
+ { // must release extra mapping
+ Unmap(Offset(p, osize), nbytes-osize);
+ return 0;
+ }
+ // if successful, the new length code will have overwritten the old one (it is at least as long)
+ return p;
+ }
+
+ // fallback to allocate/copy/free
+ if (mode & ENeverMove)
+ return 0; // not allowed to move cell
+
+ newp = PagedAllocate(nbytes);
+ if (!newp)
+ return 0;
+ memcpy(newp, p, osize);
+ PagedFree(p);
+ return newp;
+}
+
+void RHybridHeap::PagedFree(void* p)
+{
+ HEAP_ASSERT(Ceiling(p, iPageSize) == p);
+
+
+ unsigned size = PagedSize(p);
+
+ PagedZapSize(p, size); // clear page map
+ Unmap(p, size);
+}
+
+void RHybridHeap::PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const
+{
+ for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
+ {
+ int npage = PagedDecode(ix);
+ // Introduce paged buffer to the walk function
+ TAny* bfr = Bitmap2addr(ix);
+ int len = npage << PAGESHIFT;
+ if ( len > iPageSize )
+ { // If buffer is not larger than one page it must be a slab page mapped into bitmap
+ i->iAllocBytes += len;
+ ++i->iAllocN;
+ Walk(wi, bfr, len, EGoodAllocatedCell, EPageAllocator);
+ }
+ ix += (npage<<1);
+ }
+}
+
+void RHybridHeap::ResetBitmap()
+/*---------------------------------------------------------
+ * Go through paged_bitmap and unmap all buffers to system
+ * This method is called from RHybridHeap::Reset() to unmap all page
+ * allocated - and slab pages which are stored in bitmap, too
+ *---------------------------------------------------------*/
+{
+ unsigned iNbits = iPageMap.Size();
+ if ( iNbits )
+ {
+ for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
+ {
+ int npage = PagedDecode(ix);
+ void* p = Bitmap2addr(ix);
+ unsigned size = PagedSize(p);
+ PagedZapSize(p, size); // clear page map
+ Unmap(p, size);
+ ix += (npage<<1);
+ }
+ if ( (TInt)iNbits > MAXSMALLPAGEBITS )
+ {
+ // unmap page reserved for enlarged bitmap
+ Unmap(iPageMap.Addr(), (iNbits >> 3) );
+ }
+ }
+}
+
+TBool RHybridHeap::CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages)
+/*---------------------------------------------------------
+ * If aBfr = NULL
+ * Go through paged_bitmap and unmap all buffers to system
+ * and assure that by reading the first word of each page of aBfr
+ * that aBfr is still accessible
+ * else
+ * Assure that specified buffer is mapped with correct length in
+ * page map
+ *---------------------------------------------------------*/
+{
+ TBool ret;
+ if ( aBfr )
+ {
+ __ASSERT_ALWAYS((Ceiling(aBfr, iPageSize) == aBfr), HEAP_PANIC(ETHeapBadCellAddress));
+ ret = ( aSize == (TInt)PagedSize(aBfr));
+ }
+ else
+ {
+ ret = ETrue;
+ unsigned iNbits = iPageMap.Size();
+ if ( iNbits )
+ {
+ TInt npage;
+ aNPages = 0;
+ for (int ix = 0;(ix = iPageMap.Find(ix,1)) >= 0;)
+ {
+ npage = PagedDecode(ix);
+ aNPages += npage;
+ void* p = Bitmap2addr(ix);
+ __ASSERT_ALWAYS((Ceiling(p, iPageSize) == p), HEAP_PANIC(ETHeapBadCellAddress));
+ unsigned s = PagedSize(p);
+ __ASSERT_ALWAYS((Ceiling(s, iPageSize) == s), HEAP_PANIC(ETHeapBadCellAddress));
+ while ( s )
+ {
+ aDummy += *(TUint32*)((TUint8*)p + (s-iPageSize));
+ s -= iPageSize;
+ }
+ ix += (npage<<1);
+ }
+ if ( (TInt)iNbits > MAXSMALLPAGEBITS )
+ {
+ // add enlarged bitmap page(s) to total page count
+ npage = (iNbits >> 3);
+ __ASSERT_ALWAYS((Ceiling(npage, iPageSize) == npage), HEAP_PANIC(ETHeapBadCellAddress));
+ aNPages += (npage / iPageSize);
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+// The paged allocations are tracked in a bitmap which has 2 bits per page
+// this allows us to store allocations as small as 4KB
+// The presence and size of an allocation is encoded as follows:
+// let N = number of pages in the allocation, then
+// 10 : N = 1 // 4KB
+// 110n : N = 2 + n // 8-12KB
+// 1110nnnn : N = nnnn // 16-60KB
+// 1111n[18] : N = n[18] // 64KB-1GB
+
+const struct etab { unsigned char offset, len, codelen, code;} encode_table[] =
+{
+ {1,2,2,0x1},
+ {2,4,3,0x3},
+ {0,8,4,0x7},
+ {0,22,4,0xf}
+};
+
+// Return code length for specified allocation Size(assumed to be aligned to pages)
+inline unsigned paged_codelen(unsigned size, unsigned pagesz)
+{
+ HEAP_ASSERT(size == Ceiling(size, pagesz));
+
+ if (size == pagesz)
+ return 2;
+ else if (size < 4*pagesz)
+ return 4;
+ else if (size < 16*pagesz)
+ return 8;
+ else
+ return 22;
+}
+
+inline const etab& paged_coding(unsigned npage)
+{
+ if (npage < 4)
+ return encode_table[npage>>1];
+ else if (npage < 16)
+ return encode_table[2];
+ else
+ return encode_table[3];
+}
+
+bool RHybridHeap::PagedEncode(unsigned pos, unsigned npage)
+{
+ const etab& e = paged_coding(npage);
+ if (pos + e.len > iPageMap.Size())
+ {
+ // need to grow the page bitmap to fit the cell length into the map
+ // if we outgrow original bitmap buffer in RHybridHeap metadata, then just get enough pages to cover the full space:
+ // * initial 68 byte bitmap mapped (68*8*4kB):2 = 1,1MB
+ // * 4KB can Map(4096*8*4kB):2 = 64MB
+ unsigned maxsize = Ceiling(iMaxLength, iPageSize);
+ unsigned mapbits = maxsize >> (PAGESHIFT-1);
+ maxsize = Ceiling(mapbits>>3, iPageSize);
+ void* newb = Map(0, maxsize);
+ if (!newb)
+ return false;
+
+ unsigned char* oldb = iPageMap.Addr();
+ iPageMap.Init((unsigned char*)newb, (maxsize<<3), 0);
+ memcpy(newb, oldb, Ceiling(MAXSMALLPAGEBITS,8)>>3);
+ }
+ // encode the allocation block size into the bitmap, starting at the bit for the start page
+ unsigned bits = e.code;
+ bits |= (npage - e.offset) << e.codelen;
+ iPageMap.Set(pos, e.len, bits);
+ return true;
+}
+
+unsigned RHybridHeap::PagedDecode(unsigned pos) const
+{
+ __ASSERT_ALWAYS(pos + 2 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+
+ unsigned bits = iPageMap.Bits(pos,2);
+ __ASSERT_ALWAYS(bits & 1, HEAP_PANIC(ETHeapBadCellAddress));
+ bits >>= 1;
+ if (bits == 0)
+ return 1;
+ __ASSERT_ALWAYS(pos + 4 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+ bits = iPageMap.Bits(pos+2,2);
+ if ((bits & 1) == 0)
+ return 2 + (bits>>1);
+ else if ((bits>>1) == 0)
+ {
+ __ASSERT_ALWAYS(pos + 8 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+ return iPageMap.Bits(pos+4, 4);
+ }
+ else
+ {
+ __ASSERT_ALWAYS(pos + 22 <= iPageMap.Size(), HEAP_PANIC(ETHeapBadCellAddress));
+ return iPageMap.Bits(pos+4, 18);
+ }
+}
+
+inline void RHybridHeap::PagedZapSize(void* p, unsigned size)
+{iPageMap.Setn(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), paged_codelen(size, iPageSize) ,0);}
+
+inline unsigned RHybridHeap::PagedSize(void* p) const
+ { return PagedDecode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1)) << PAGESHIFT; }
+
+inline bool RHybridHeap::PagedSetSize(void* p, unsigned size)
+{ return PagedEncode(PtrDiff(p, iMemBase) >> (PAGESHIFT-1), size >> PAGESHIFT); }
+
+inline void* RHybridHeap::Bitmap2addr(unsigned pos) const
+ { return iMemBase + (1 << (PAGESHIFT-1))*pos; }
+
+
+#ifndef QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////
+/**
+Constructor where minimum and maximum length of the heap can be defined.
+It defaults the chunk heap to be created to have use a new local chunk,
+to have a grow by value of KMinHeapGrowBy, to be unaligned, not to be
+single threaded and not to have any mode flags set.
+
+@param aMinLength The minimum length of the heap to be created.
+@param aMaxLength The maximum length to which the heap to be created can grow.
+ If the supplied value is less than a page size, then it
+ is discarded and the page size is used instead.
+*/
+EXPORT_C TChunkHeapCreateInfo::TChunkHeapCreateInfo(TInt aMinLength, TInt aMaxLength) :
+ iVersionNumber(EVersion0), iMinLength(aMinLength), iMaxLength(aMaxLength),
+iAlign(0), iGrowBy(1), iSingleThread(EFalse),
+iOffset(0), iPaging(EUnspecified), iMode(0), iName(NULL)
+{
+}
+
+
+/**
+Sets the chunk heap to create a new chunk with the specified name.
+
+This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
+TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
+
+@param aName The name to be given to the chunk heap to be created
+If NULL, the function constructs a local chunk to host the heap.
+If not NULL, a pointer to a descriptor containing the name to be
+assigned to the global chunk hosting the heap.
+*/
+EXPORT_C void TChunkHeapCreateInfo::SetCreateChunk(const TDesC* aName)
+{
+ iName = (TDesC*)aName;
+ iChunk.SetHandle(KNullHandle);
+}
+
+
+/**
+Sets the chunk heap to be created to use the chunk specified.
+
+This overriddes any previous call to TChunkHeapCreateInfo::SetNewChunkHeap() or
+TChunkHeapCreateInfo::SetExistingChunkHeap() for this TChunkHeapCreateInfo object.
+
+@param aChunk A handle to the chunk to use for the heap.
+*/
+EXPORT_C void TChunkHeapCreateInfo::SetUseChunk(const RChunk aChunk)
+{
+ iName = NULL;
+ iChunk = aChunk;
+}
+
+EXPORT_C RHeap* UserHeap::FixedHeap(TAny* aBase, TInt aMaxLength, TInt aAlign, TBool aSingleThread)
+/**
+Creates a fixed length heap at a specified location.
+
+On successful return from this function, the heap is ready to use. This assumes that
+the memory pointed to by aBase is mapped and able to be used. You must ensure that you
+pass in a large enough value for aMaxLength. Passing in a value that is too small to
+hold the metadata for the heap (~1 KB) will result in the size being rounded up and the
+heap thereby running over the end of the memory assigned to it. But then if you were to
+pass in such as small value then you would not be able to do any allocations from the
+heap anyway. Moral of the story: Use a sensible value for aMaxLength!
+
+@param aBase A pointer to the location where the heap is to be constructed.
+@param aMaxLength The maximum length in bytes to which the heap can grow. If the
+ supplied value is too small to hold the heap's metadata, it
+ will be increased.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+
+@return A pointer to the new heap, or NULL if the heap could not be created.
+
+@panic USER 56 if aMaxLength is negative.
+*/
+{
+ __ASSERT_ALWAYS( aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
+ if ( aMaxLength < (TInt)sizeof(RHybridHeap) )
+ aMaxLength = sizeof(RHybridHeap);
+
+ RHybridHeap* h = new(aBase) RHybridHeap(aMaxLength, aAlign, aSingleThread);
+
+ if (!aSingleThread)
+ {
+ TInt r = h->iLock.CreateLocal();
+ if (r!=KErrNone)
+ return NULL; // No need to delete the RHybridHeap instance as the new above is only a placement new
+ h->iHandles = (TInt*)&h->iLock;
+ h->iHandleCount = 1;
+ }
+ return h;
+}
+
+/**
+Creates a chunk heap of the type specified by the parameter aCreateInfo.
+
+@param aCreateInfo A reference to a TChunkHeapCreateInfo object specifying the
+type of chunk heap to create.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@panic USER 41 if the heap's specified minimum length is greater than the specified maximum length.
+@panic USER 55 if the heap's specified minimum length is negative.
+@panic USER 172 if the heap's specified alignment is not a power of 2 or is less than the size of a TAny*.
+*/
+EXPORT_C RHeap* UserHeap::ChunkHeap(const TChunkHeapCreateInfo& aCreateInfo)
+{
+ // aCreateInfo must have been configured to use a new chunk or an exiting chunk.
+ __ASSERT_ALWAYS(!(aCreateInfo.iMode & (TUint32)~EChunkHeapMask), ::Panic(EHeapCreateInvalidMode));
+ RHeap* h = NULL;
+
+ if (aCreateInfo.iChunk.Handle() == KNullHandle)
+ {
+ // A new chunk is to be created for this heap.
+
+ __ASSERT_ALWAYS(aCreateInfo.iMinLength >= 0, ::Panic(ETHeapMinLengthNegative));
+ __ASSERT_ALWAYS(aCreateInfo.iMaxLength >= aCreateInfo.iMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
+
+ TInt maxLength = aCreateInfo.iMaxLength;
+ TInt page_size;
+ GET_PAGE_SIZE(page_size);
+
+ if (maxLength < page_size)
+ maxLength = page_size;
+
+ TChunkCreateInfo chunkInfo;
+#if USE_HYBRID_HEAP
+ if ( aCreateInfo.iOffset )
+ chunkInfo.SetNormal(0, maxLength); // Create DL only heap
+ else
+ {
+ maxLength = 2*maxLength;
+ chunkInfo.SetDisconnected(0, 0, maxLength); // Create hybrid heap
+ }
+#else
+ chunkInfo.SetNormal(0, maxLength); // Create DL only heap
+#endif
+ chunkInfo.SetOwner((aCreateInfo.iSingleThread)? EOwnerThread : EOwnerProcess);
+ if (aCreateInfo.iName)
+ chunkInfo.SetGlobal(*aCreateInfo.iName);
+ // Set the paging attributes of the chunk.
+ if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EPaged)
+ chunkInfo.SetPaging(TChunkCreateInfo::EPaged);
+ if (aCreateInfo.iPaging == TChunkHeapCreateInfo::EUnpaged)
+ chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
+ // Create the chunk.
+ RChunk chunk;
+ if (chunk.Create(chunkInfo) != KErrNone)
+ return NULL;
+ // Create the heap using the new chunk.
+ TUint mode = aCreateInfo.iMode | EChunkHeapDuplicate; // Must duplicate the handle.
+ h = OffsetChunkHeap(chunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
+ aCreateInfo.iGrowBy, maxLength, aCreateInfo.iAlign,
+ aCreateInfo.iSingleThread, mode);
+ chunk.Close();
+ }
+ else
+ {
+ h = OffsetChunkHeap(aCreateInfo.iChunk, aCreateInfo.iMinLength, aCreateInfo.iOffset,
+ aCreateInfo.iGrowBy, aCreateInfo.iMaxLength, aCreateInfo.iAlign,
+ aCreateInfo.iSingleThread, aCreateInfo.iMode);
+ }
+ return h;
+}
+
+
+
+EXPORT_C RHeap* UserHeap::ChunkHeap(const TDesC* aName, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread)
+/**
+Creates a heap in a local or global chunk.
+
+The chunk hosting the heap can be local or global.
+
+A local chunk is one which is private to the process creating it and is not
+intended for access by other user processes. A global chunk is one which is
+visible to all processes.
+
+The hosting chunk is local, if the pointer aName is NULL, otherwise the
+hosting chunk is global and the descriptor *aName is assumed to contain
+the name to be assigned to it.
+
+Ownership of the host chunk is vested in the current process.
+
+A minimum and a maximum size for the heap can be specified. On successful
+return from this function, the size of the heap is at least aMinLength.
+If subsequent requests for allocation of memory from the heap cannot be
+satisfied by compressing the heap, the size of the heap is extended in
+increments of aGrowBy until the request can be satisfied. Attempts to extend
+the heap causes the size of the host chunk to be adjusted.
+
+Note that the size of the heap cannot be adjusted by more than aMaxLength.
+
+@param aName If NULL, the function constructs a local chunk to host
+ the heap. If not NULL, a pointer to a descriptor containing
+ the name to be assigned to the global chunk hosting the heap.
+@param aMinLength The minimum length of the heap in bytes. This will be
+ rounded up to the nearest page size by the allocator.
+@param aMaxLength The maximum length in bytes to which the heap can grow. This
+ will be rounded up to the nearest page size by the allocator.
+@param aGrowBy The number of bytes by which the heap will grow when more
+ memory is required. This will be rounded up to the nearest
+ page size by the allocator. If a value is not explicitly
+ specified, the page size is taken by default.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@panic USER 41 if aMaxLength is < aMinLength.
+@panic USER 55 if aMinLength is negative.
+@panic USER 56 if aMaxLength is negative.
+*/
+ {
+ TInt page_size;
+ GET_PAGE_SIZE(page_size);
+ TInt minLength = _ALIGN_UP(aMinLength, page_size);
+ TInt maxLength = Max(aMaxLength, minLength);
+
+ TChunkHeapCreateInfo createInfo(minLength, maxLength);
+ createInfo.SetCreateChunk(aName);
+ createInfo.SetGrowBy(aGrowBy);
+ createInfo.SetAlignment(aAlign);
+ createInfo.SetSingleThread(aSingleThread);
+
+ return ChunkHeap(createInfo);
+ }
+#endif // QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
+
+EXPORT_C RHeap* UserHeap::ChunkHeap(RChunk aChunk, TInt aMinLength, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
+/**
+Creates a heap in an existing chunk.
+
+This function is intended to be used to create a heap in a user writable code
+chunk as created by a call to RChunk::CreateLocalCode(). This type of heap can
+be used to hold code fragments from a JIT compiler.
+
+@param aChunk The chunk that will host the heap.
+@param aMinLength The minimum length of the heap in bytes. This will be
+ rounded up to the nearest page size by the allocator.
+@param aGrowBy The number of bytes by which the heap will grow when more
+ memory is required. This will be rounded up to the nearest
+ page size by the allocator. If a value is not explicitly
+ specified, the page size is taken by default.
+@param aMaxLength The maximum length in bytes to which the heap can grow. This
+ will be rounded up to the nearest page size by the allocator.
+ If 0 is passed in, the maximum lengt of the chunk is used.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@see UserHeap::OffsetChunkHeap()
+*/
+ {
+ return OffsetChunkHeap(aChunk, aMinLength, 0, aGrowBy, aMaxLength, aAlign, aSingleThread, aMode);
+ }
+
+EXPORT_C RHeap* UserHeap::OffsetChunkHeap(RChunk aChunk, TInt aMinLength, TInt aOffset, TInt aGrowBy, TInt aMaxLength, TInt aAlign, TBool aSingleThread, TUint32 aMode)
+/**
+Creates a heap in an existing chunk, offset from the beginning of the chunk.
+
+This function is intended to be used to create a heap using a chunk which has
+some of its memory already used, at the start of that that chunk. The maximum
+length to which the heap can grow is the maximum size of the chunk, minus the
+data at the start of the chunk.
+
+The offset at which to create the heap is passed in as the aOffset parameter.
+Legacy heap implementations always respected the aOffset value, however more
+modern heap implementations are more sophisticated and cannot necessarily respect
+this value. Therefore, if possible, you should always use an aOffset of 0 unless
+you have a very explicit requirement for using a non zero value. Using a non zero
+value will result in a less efficient heap algorithm being used in order to respect
+the offset.
+
+Another issue to consider when using this function is the type of the chunk passed
+in. In order for the most efficient heap algorithms to be used, the chunk passed
+in should always be a disconnected chunk. Passing in a non disconnected chunk will
+again result in a less efficient heap algorithm being used.
+
+Finally, another requirement for the most efficient heap algorithms to be used is
+for the heap to be able to expand. Therefore, unless you have a specific reason to
+do so, always specify aMaxLength > aMinLength.
+
+So, if possible, use aOffset == zero, aMaxLength > aMinLength and a disconnected
+chunk for best results!
+
+@param aChunk The chunk that will host the heap.
+@param aMinLength The minimum length of the heap in bytes. This will be
+ rounded up to the nearest page size by the allocator.
+@param aOffset The offset in bytes from the start of the chunk at which to
+ create the heap. If used (and it shouldn't really be!)
+ then it will be rounded up to a multiple of 8, to respect
+ EABI 8 byte alignment requirements.
+@param aGrowBy The number of bytes by which the heap will grow when more
+ memory is required. This will be rounded up to the nearest
+ page size by the allocator. If a value is not explicitly
+ specified, the page size is taken by default.
+@param aMaxLength The maximum length in bytes to which the heap can grow. This
+ will be rounded up to the nearest page size by the allocator.
+ If 0 is passed in, the maximum length of the chunk is used.
+@param aAlign From Symbian^4 onwards, this value is ignored but EABI 8
+ byte alignment is guaranteed for all allocations 8 bytes or
+ more in size. 4 byte allocations will be aligned to a 4
+ byte boundary. Best to pass in zero.
+@param aSingleThread EFalse if the heap is to be accessed from multiple threads.
+ This will cause internal locks to be created, guaranteeing
+ thread safety.
+@param aMode Flags controlling the heap creation. See RAllocator::TFlags.
+
+@return A pointer to the new heap or NULL if the heap could not be created.
+
+@panic USER 41 if aMaxLength is < aMinLength.
+@panic USER 55 if aMinLength is negative.
+@panic USER 56 if aMaxLength is negative.
+@panic USER 168 if aOffset is negative.
+*/
+ {
+ TBool dlOnly = EFalse;
+ TInt pageSize;
+ GET_PAGE_SIZE(pageSize);
+ TInt align = RHybridHeap::ECellAlignment; // Always use EABI 8 byte alignment
+
+ __ASSERT_ALWAYS(aMinLength>=0, ::Panic(ETHeapMinLengthNegative));
+ __ASSERT_ALWAYS(aMaxLength>=0, ::Panic(ETHeapMaxLengthNegative));
+
+ if ( aMaxLength > 0 )
+ __ASSERT_ALWAYS(aMaxLength>=aMinLength, ::Panic(ETHeapCreateMaxLessThanMin));
+
+ // Stick to EABI alignment for the start offset, if any
+ aOffset = _ALIGN_UP(aOffset, align);
+
+ // Using an aOffset > 0 means that we can't use the hybrid allocator and have to revert to Doug Lea only
+ if (aOffset > 0)
+ dlOnly = ETrue;
+
+ // Ensure that the minimum length is enough to hold the RHybridHeap object itself
+ TInt minCell = _ALIGN_UP(Max((TInt)RHybridHeap::EAllocCellSize, (TInt)RHybridHeap::EFreeCellSize), align);
+ TInt hybridHeapSize = (sizeof(RHybridHeap) + minCell);
+ if (aMinLength < hybridHeapSize)
+ aMinLength = hybridHeapSize;
+
+ // Round the minimum length up to a multiple of the page size, taking into account that the
+ // offset takes up a part of the chunk's memory
+ aMinLength = _ALIGN_UP((aMinLength + aOffset), pageSize);
+
+ // If aMaxLength is 0 then use the entire chunk
+ TInt chunkSize = aChunk.MaxSize();
+ if (aMaxLength == 0)
+ {
+ aMaxLength = chunkSize;
+ }
+ // Otherwise round the maximum length up to a multiple of the page size, taking into account that
+ // the offset takes up a part of the chunk's memory. We also clip the maximum length to the chunk
+ // size, so the user may get a little less than requested if the chunk size is not large enough
+ else
+ {
+ aMaxLength = _ALIGN_UP((aMaxLength + aOffset), pageSize);
+ if (aMaxLength > chunkSize)
+ aMaxLength = chunkSize;
+ }
+
+ // If the rounded up values don't make sense then a crazy aMinLength or aOffset must have been passed
+ // in, so fail the heap creation
+ if (aMinLength > aMaxLength)
+ return NULL;
+
+ // Adding the offset into the minimum and maximum length was only necessary for ensuring a good fit of
+ // the heap into the chunk. Re-adjust them now back to non offset relative sizes
+ aMinLength -= aOffset;
+ aMaxLength -= aOffset;
+
+ // If we are still creating the hybrid allocator (call parameter
+ // aOffset is 0 and aMaxLength > aMinLength), we must reduce heap
+ // aMaxLength size to the value aMaxLength/2 and set the aOffset to point in the middle of chunk.
+ TInt offset = aOffset;
+ TInt maxLength = aMaxLength;
+ if (!dlOnly && (aMaxLength > aMinLength))
+ maxLength = offset = _ALIGN_UP(aMaxLength >> 1, pageSize);
+
+ // Try to use commit to map aMinLength physical memory for the heap, taking into account the offset. If
+ // the operation fails, suppose that the chunk is not a disconnected heap and try to map physical memory
+ // with adjust. In this case, we also can't use the hybrid allocator and have to revert to Doug Lea only
+ TBool useAdjust = EFalse;
+ TInt r = aChunk.Commit(offset, aMinLength);
+ if (r == KErrGeneral)
+ {
+ dlOnly = useAdjust = ETrue;
+ r = aChunk.Adjust(aMinLength);
+ if (r != KErrNone)
+ return NULL;
+ }
+ else if (r == KErrNone)
+ {
+ // We have a disconnected chunk reset aOffset and aMaxlength
+ aOffset = offset;
+ aMaxLength = maxLength;
+ }
+
+ else
+ return NULL;
+
+ // Parameters have been mostly verified and we know whether to use the hybrid allocator or Doug Lea only. The
+ // constructor for the hybrid heap will automatically drop back to Doug Lea if it determines that aMinLength
+ // == aMaxLength, so no need to worry about that requirement here. The user specified alignment is not used but
+ // is passed in so that it can be sanity checked in case the user is doing something totally crazy with it
+ RHybridHeap* h = new (aChunk.Base() + aOffset) RHybridHeap(aChunk.Handle(), aOffset, aMinLength, aMaxLength,
+ aGrowBy, aAlign, aSingleThread, dlOnly, useAdjust);
+
+ if (h->ConstructLock(aMode) != KErrNone)
+ return NULL;
+
+ // Return the heap address
+ return h;
+ }
+
+#define UserTestDebugMaskBit(bit) (TBool)(UserSvr::DebugMask(bit>>5) & (1<<(bit&31)))
+
+_LIT(KLitDollarHeap,"$HEAP");
+EXPORT_C TInt UserHeap::CreateThreadHeap(SStdEpocThreadCreateInfo& aInfo, RHeap*& aHeap, TInt aAlign, TBool aSingleThread)
+/**
+@internalComponent
+*/
+//
+// Create a user-side heap
+//
+{
+ TInt page_size;
+ GET_PAGE_SIZE(page_size);
+ TInt minLength = _ALIGN_UP(aInfo.iHeapInitialSize, page_size);
+ TInt maxLength = Max(aInfo.iHeapMaxSize, minLength);
+#ifdef ENABLE_BTRACE
+ if (UserTestDebugMaskBit(96)) // 96 == KUSERHEAPTRACE in nk_trace.h
+ aInfo.iFlags |= ETraceHeapAllocs;
+#endif // ENABLE_BTRACE
+ // Create the thread's heap chunk.
+ RChunk c;
+#ifndef NO_NAMED_LOCAL_CHUNKS
+ TChunkCreateInfo createInfo;
+
+ createInfo.SetThreadHeap(0, maxLength, KLitDollarHeap()); // Initialise with no memory committed.
+#if USE_HYBRID_HEAP
+ //
+ // Create disconnected chunk for hybrid heap with double max length value
+ //
+ maxLength = 2*maxLength;
+ createInfo.SetDisconnected(0, 0, maxLength);
+#endif
+#ifdef SYMBIAN_WRITABLE_DATA_PAGING
+ // Set the paging policy of the heap chunk based on the thread's paging policy.
+ TUint pagingflags = aInfo.iFlags & EThreadCreateFlagPagingMask;
+ switch (pagingflags)
+ {
+ case EThreadCreateFlagPaged:
+ createInfo.SetPaging(TChunkCreateInfo::EPaged);
+ break;
+ case EThreadCreateFlagUnpaged:
+ createInfo.SetPaging(TChunkCreateInfo::EUnpaged);
+ break;
+ case EThreadCreateFlagPagingUnspec:
+ // Leave the chunk paging policy unspecified so the process's
+ // paging policy is used.
+ break;
+ }
+#endif // SYMBIAN_WRITABLE_DATA_PAGING
+
+ TInt r = c.Create(createInfo);
+#else
+ TInt r = c.CreateDisconnectedLocal(0, 0, maxLength * 2);
+#endif
+ if (r!=KErrNone)
+ return r;
+
+ aHeap = ChunkHeap(c, minLength, page_size, maxLength, aAlign, aSingleThread, EChunkHeapSwitchTo|EChunkHeapDuplicate);
+ c.Close();
+
+ if ( !aHeap )
+ return KErrNoMemory;
+
+#ifdef ENABLE_BTRACE
+ if (aInfo.iFlags & ETraceHeapAllocs)
+ {
+ aHeap->iFlags |= RHeap::ETraceAllocs;
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapCreate,(TUint32)aHeap, RHybridHeap::EAllocCellSize);
+ TInt chunkId = ((RHandleBase&)((RHybridHeap*)aHeap)->iChunkHandle).BTraceId();
+ BTraceContext8(BTrace::EHeap, BTrace::EHeapChunkCreate, (TUint32)aHeap, chunkId);
+ }
+ if (aInfo.iFlags & EMonitorHeapMemory)
+ aHeap->iFlags |= RHeap::EMonitorMemory;
+#endif // ENABLE_BTRACE
+
+ return KErrNone;
+}
+
+#endif // __KERNEL_MODE__
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
diff --git a/src/corelib/arch/symbian/heap_hybrid_p.h b/src/corelib/arch/symbian/heap_hybrid_p.h
new file mode 100644
index 0000000000..1767fc1344
--- /dev/null
+++ b/src/corelib/arch/symbian/heap_hybrid_p.h
@@ -0,0 +1,406 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __HEAP_HYBRID_H__
+#define __HEAP_HYBRID_H__
+
+#include <e32cmn.h>
+
+#ifdef __WINS__
+#define USE_HYBRID_HEAP 0
+#else
+#define USE_HYBRID_HEAP 1
+#endif
+
+// This stuff is all temporary in order to prevent having to include dla.h from heap_hybrid.h, which causes
+// problems due to its definition of size_t (and possibly other types). This is unfortunate but we cannot
+// pollute the namespace with these types or it will cause problems with Open C and other POSIX compatibility
+// efforts in Symbian
+
+#define NSMALLBINS (32U)
+#define NTREEBINS (32U)
+
+#ifndef MALLOC_ALIGNMENT
+ #define MALLOC_ALIGNMENT ((TUint)8U)
+#endif /* MALLOC_ALIGNMENT */
+
+#define CHUNK_OVERHEAD (sizeof(TUint))
+
+typedef unsigned int bindex_t;
+typedef unsigned int binmap_t;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_segment msegment;
+typedef struct malloc_state* mstate;
+typedef struct malloc_tree_chunk* tbinptr;
+typedef struct malloc_tree_chunk* tchunkptr;
+
+struct malloc_segment {
+ TUint8* iBase; /* base address */
+ TUint iSize; /* allocated size */
+};
+
+struct malloc_state {
+ binmap_t iSmallMap;
+ binmap_t iTreeMap;
+ TUint iDvSize;
+ TUint iTopSize;
+ mchunkptr iDv;
+ mchunkptr iTop;
+ TUint iTrimCheck;
+ mchunkptr iSmallBins[(NSMALLBINS+1)*2];
+ tbinptr iTreeBins[NTREEBINS];
+ msegment iSeg;
+ };
+
+class RHybridHeap : public RHeap
+ {
+
+public:
+ // declarations copied from Symbian^4 RAllocator and RHeap
+ typedef void (*TWalkFunc)(TAny*, RHeap::TCellType, TAny*, TInt);
+ enum TFlags {ESingleThreaded=1, EFixedSize=2, ETraceAllocs=4, EMonitorMemory=8,};
+ enum TAllocDebugOp
+ {
+ ECount, EMarkStart, EMarkEnd, ECheck, ESetFail, ECopyDebugInfo, ESetBurstFail, EGetFail,
+ EGetSize=48, EGetMaxLength, EGetBase, EAlignInteger, EAlignAddr
+ };
+ enum TDebugOp { EWalk = 128, EHybridHeap };
+ enum THybridAllocFail
+ {
+ ERandom, ETrueRandom, EDeterministic, EHybridNone, EFailNext, EReset, EBurstRandom,
+ EBurstTrueRandom, EBurstDeterministic, EBurstFailNext, ECheckFailure,
+ };
+ enum { EDebugHdrSize = sizeof(SDebugCell) };
+#ifndef SYMBIAN_ENABLE_SPLIT_HEADERS
+ struct SRAllocatorBurstFail {TInt iBurst; TInt iRate; TInt iUnused[2];};
+#endif
+
+ struct HeapInfo
+ {
+ unsigned iFootprint;
+ unsigned iMaxSize;
+ unsigned iAllocBytes;
+ unsigned iAllocN;
+ unsigned iFreeBytes;
+ unsigned iFreeN;
+ };
+
+ struct SHeapCellInfo { RHybridHeap* iHeap; TInt iTotalAlloc; TInt iTotalAllocSize; TInt iTotalFree; TInt iLevelAlloc; SDebugCell* iStranded; };
+
+
+ /**
+ @internalComponent
+ */
+ enum TAllocatorType
+ {ESlabAllocator, EDougLeaAllocator, EPageAllocator, EFullSlab=0x80, EPartialFullSlab=0x40, EEmptySlab=0x20, ESlabSpare=0x10, ESlabMask=0xf0};
+
+
+ /**
+ @internalComponent
+ */
+ struct SWalkInfo {
+ /**
+ Walk function address shall be called
+ */
+ TWalkFunc iFunction;
+
+ /**
+ The first parameter for callback function
+ */
+ TAny* iParam;
+ /**
+ Pointer to RHybridHeap object
+ */
+ RHybridHeap* iHeap;
+ };
+
+ /**
+ @internalComponent
+ */
+ struct SConfig {
+ /**
+ Required slab configuration ( bit 0=4, bit 1=8 ..
+ bit 13 = 56)
+ */
+ TUint32 iSlabBits;
+ /**
+ Delayed slab threshold in bytes (0 = no threshold)
+ */
+ TInt iDelayedSlabThreshold;
+ /**
+ 2^n is smallest size allocated in paged allocator (14-31 = 16 Kb --> )
+ */
+ TInt iPagePower;
+
+ };
+
+ /**
+ @internalComponent
+
+ This structure is used by test code for configuring the allocators and obtaining information
+ from them in order to ensure they are behaving as required. This is internal test specific
+ code and is liable to be changed without warning at any time. You should under no circumstances
+ be using it!
+ */
+ struct STestCommand
+ {
+ TInt iCommand; // The test related command to be executed
+
+ union
+ {
+ SConfig iConfig; // Configuration used by test code only
+ TAny* iData; // Extra supporting data for the test command
+ };
+ };
+
+ /**
+ @internalComponent
+
+ Commands used by test code for configuring the allocators and obtaining information them them
+ */
+ enum TTestCommand { EGetConfig, ESetConfig, EHeapMetaData, ETestData };
+
+ virtual TAny* Alloc(TInt aSize);
+ virtual void Free(TAny* aPtr);
+ virtual TAny* ReAlloc(TAny* aPtr, TInt aSize, TInt aMode=0);
+ virtual TInt AllocLen(const TAny* aCell) const;
+#ifndef __KERNEL_MODE__
+ virtual TInt Compress();
+ virtual void Reset();
+ virtual TInt AllocSize(TInt& aTotalAllocSize) const;
+ virtual TInt Available(TInt& aBiggestBlock) const;
+#endif
+ virtual TInt DebugFunction(TInt aFunc, TAny* a1=NULL, TAny* a2=NULL);
+protected:
+ virtual TInt Extension_(TUint aExtensionId, TAny*& a0, TAny* a1);
+
+public:
+ TAny* operator new(TUint aSize, TAny* aBase) __NO_THROW;
+ void operator delete(TAny*, TAny*);
+
+private:
+ TInt DoCountAllocFree(TInt& aFree);
+ TInt DoCheckHeap(SCheckInfo* aInfo);
+ void DoMarkStart();
+ TUint32 DoMarkEnd(TInt aExpected);
+ void DoSetAllocFail(TAllocFail aType, TInt aRate);
+ TBool CheckForSimulatedAllocFail();
+ void DoSetAllocFail(TAllocFail aType, TInt aRate, TUint aBurst);
+
+ void Lock() const;
+ void Unlock() const;
+ TInt ChunkHandle() const;
+
+ RHybridHeap(TInt aChunkHandle, TInt aOffset, TInt aMinLength, TInt aMaxLength, TInt aGrowBy, TInt aAlign, TBool aSingleThread, TBool aDlOnly, TBool aUseAdjust);
+ RHybridHeap(TInt aMaxLength, TInt aAlign=0, TBool aSingleThread=ETrue);
+ RHybridHeap();
+
+ void Init(TInt aBitmapSlab, TInt aPagePower);
+ inline void InitBins(mstate m);
+ inline void InitTop(mstate m, mchunkptr p, TUint psize);
+ void* SysAlloc(mstate m, TUint nb);
+ int SysTrim(mstate m, TUint pad);
+ void* TmallocLarge(mstate m, TUint nb);
+ void* TmallocSmall(mstate m, TUint nb);
+ /*MACROS converted functions*/
+ static inline void UnlinkFirstSmallChunk(mstate M,mchunkptr B,mchunkptr P,bindex_t& I);
+ static inline void InsertSmallChunk(mstate M,mchunkptr P, TUint S);
+ static inline void InsertChunk(mstate M,mchunkptr P,TUint S);
+ static inline void UnlinkLargeChunk(mstate M,tchunkptr X);
+ static inline void UnlinkSmallChunk(mstate M, mchunkptr P,TUint S);
+ static inline void UnlinkChunk(mstate M, mchunkptr P, TUint S);
+ static inline void ComputeTreeIndex(TUint S, bindex_t& I);
+ static inline void InsertLargeChunk(mstate M,tchunkptr X,TUint S);
+ static inline void ReplaceDv(mstate M, mchunkptr P, TUint S);
+ static inline void ComputeBit2idx(binmap_t X,bindex_t& I);
+
+ void DoComputeTreeIndex(TUint S, bindex_t& I);
+ void DoCheckAnyChunk(mstate m, mchunkptr p);
+ void DoCheckTopChunk(mstate m, mchunkptr p);
+ void DoCheckInuseChunk(mstate m, mchunkptr p);
+ void DoCheckFreeChunk(mstate m, mchunkptr p);
+ void DoCheckMallocedChunk(mstate m, void* mem, TUint s);
+ void DoCheckTree(mstate m, tchunkptr t);
+ void DoCheckTreebin(mstate m, bindex_t i);
+ void DoCheckSmallbin(mstate m, bindex_t i);
+ TInt BinFind(mstate m, mchunkptr x);
+ TUint TraverseAndCheck(mstate m);
+ void DoCheckMallocState(mstate m);
+
+ TInt GetInfo(struct HeapInfo* i, SWalkInfo* wi=NULL) const;
+ void InitDlMalloc(TUint capacity, int locked);
+ void* DlMalloc(TUint);
+ void DlFree(void*);
+ void* DlRealloc(void*, TUint, TInt);
+ TUint DlInfo(struct HeapInfo* i, SWalkInfo* wi) const;
+ void DoCheckCommittedSize(TInt aNPages, mstate aM);
+
+ TAny* ReAllocImpl(TAny* aPtr, TInt aSize, TInt aMode);
+ void Construct(TBool aSingleThread, TBool aDLOnly, TBool aUseAdjust, TInt aAlign);
+#ifndef __KERNEL_MODE__
+ TInt ConstructLock(TUint32 aMode);
+#endif
+ static void Walk(SWalkInfo* aInfo, TAny* aBfr, TInt aLth, TCellType aBfrType, TAllocatorType aAlloctorType);
+ static void WalkCheckCell(TAny* aPtr, TCellType aType, TAny* aCell, TInt aLen);
+ void* Map(void* p, TInt sz);
+ void Unmap(void* p,TInt sz);
+
+private:
+ TInt iMinLength;
+ TInt iOffset; // offset of RHeap object from chunk base
+ TInt iGrowBy;
+ TInt iMinCell;
+ TInt iPageSize;
+
+ // Temporarily commented out and exported from RHeap to prevent source breaks from req417-52840.
+ // This will be moved with another REQ after submission and subsequent fixing of bad code
+ //TInt iNestingLevel;
+ TInt iAllocCount;
+ // Temporarily commented out. See comment above regarding req417-52840 source breaks
+ //TAllocFail iFailType;
+ TInt iFailRate;
+ TBool iFailed;
+ TInt iFailAllocCount;
+ TInt iRand;
+ // Temporarily commented out. See comment above regarding req417-52840 source breaks
+ //TAny* iTestData;
+
+ TInt iChunkSize;
+ TInt iHighWaterMark;
+ TBool iUseAdjust;
+ TBool iDLOnly;
+
+ malloc_state iGlobalMallocState;
+
+#ifdef __KERNEL_MODE__
+
+ friend class RHeapK;
+
+#else
+
+ friend class UserHeap;
+ friend class HybridHeap;
+ friend class TestHybridHeap;
+
+private:
+
+ static void TreeRemove(slab* s);
+ static void TreeInsert(slab* s,slab** r);
+
+ enum {EOkBits = (1<<(MAXSLABSIZE>>2))-1};
+
+ void SlabInit();
+ void SlabConfig(unsigned slabbitmap);
+ void* SlabAllocate(slabset& allocator);
+ void SlabFree(void* p);
+ void* AllocNewSlab(slabset& allocator);
+ void* AllocNewPage(slabset& allocator);
+ void* InitNewSlab(slabset& allocator, slab* s);
+ void FreeSlab(slab* s);
+ void FreePage(page* p);
+ void SlabInfo(struct HeapInfo* i, SWalkInfo* wi) const;
+ static void SlabFullInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
+ static void SlabPartialInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
+ static void SlabEmptyInfo(slab* s, struct HeapInfo* i, SWalkInfo* wi);
+ static void TreeWalk(slab* const* root, void (*f)(slab*, struct HeapInfo*, SWalkInfo*), struct HeapInfo* i, SWalkInfo* wi);
+
+ static void WalkPartialFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt aLth);
+ static void WalkFullSlab(SWalkInfo* aInfo, slab* aSlab, TCellType aBfrType, TInt aLth);
+ void DoCheckSlab(slab* aSlab, TAllocatorType aSlabType, TAny* aBfr=NULL);
+ void DoCheckSlabTrees();
+ void DoCheckSlabTree(slab** aS, TBool aPartialPage);
+ void BuildPartialSlabBitmap(TUint32* aBitmap, slab* aSlab, TAny* aBfr=NULL);
+
+ static inline unsigned SlabHeaderFree(unsigned h)
+ {return (h&0x000000ff);}
+ static inline unsigned SlabHeaderPagemap(unsigned h)
+ {return (h&0x00000f00)>>8;}
+ static inline unsigned SlabHeaderSize(unsigned h)
+ {return (h&0x0003f000)>>12;}
+ static inline unsigned SlabHeaderUsedm4(unsigned h)
+ {return (h&0x0ffc0000)>>18;}
+ /***paged allocator code***/
+ void PagedInit(TInt aPagePower);
+ void* PagedAllocate(unsigned size);
+ void PagedFree(void* p);
+ void* PagedReallocate(void* p, unsigned size, TInt mode);
+
+ bool PagedEncode(unsigned pos, unsigned npage);
+ unsigned PagedDecode(unsigned pos) const;
+ inline unsigned PagedSize(void* p) const;
+ inline bool PagedSetSize(void* p, unsigned size);
+ inline void PagedZapSize(void* p, unsigned size);
+ inline void* Bitmap2addr(unsigned pos) const;
+ void PagedInfo(struct HeapInfo* i, SWalkInfo* wi) const;
+ void ResetBitmap();
+ TBool CheckBitmap(void* aBfr, TInt aSize, TUint32& aDummy, TInt& aNPages);
+
+private:
+ paged_bitmap iPageMap; // bitmap representing page allocator's pages
+ TUint8* iMemBase; // bottom of paged/slab memory (chunk base)
+ TUint8 iBitMapBuffer[MAXSMALLPAGEBITS>>3]; // buffer for initial page bitmap
+ TInt iSlabThreshold; // allocations < than this are done by the slab allocator
+ TInt iPageThreshold; // 2^n is smallest cell size allocated in paged allocator
+ TInt iSlabInitThreshold; // slab allocator will be used after chunk reaches this size
+ TUint32 iSlabConfigBits; // set of bits that specify which slab sizes to use
+ slab* iPartialPage; // partial-use page tree
+ slab* iFullSlab; // full slabs list (so we can find them when walking)
+ page* iSparePage; // cached, to avoid kernel exec calls for unmapping/remapping
+ TUint8 iSizeMap[(MAXSLABSIZE>>2)+1]; // index of slabset indexes based on size class
+ slabset iSlabAlloc[MAXSLABSIZE>>2]; // array of pointers to slabsets
+
+#endif // __KERNEL_MODE__
+};
+
+#define HEAP_ASSERT(x) __ASSERT_DEBUG(x, HEAP_PANIC(ETHeapBadCellAddress))
+
+template <class T> inline T Floor(const T addr, unsigned aln)
+{return T((unsigned(addr))&~(aln-1));}
+template <class T> inline T Ceiling(T addr, unsigned aln)
+{return T((unsigned(addr)+(aln-1))&~(aln-1));}
+template <class T> inline unsigned LowBits(T addr, unsigned aln)
+{return unsigned(addr)&(aln-1);}
+template <class T1, class T2> inline int PtrDiff(const T1* a1, const T2* a2)
+{return reinterpret_cast<const unsigned char*>(a1) - reinterpret_cast<const unsigned char*>(a2);}
+template <class T> inline T Offset(T addr, unsigned ofs)
+{return T(unsigned(addr)+ofs);}
+
+#endif //__HEAP_HYBRID_H__
diff --git a/src/corelib/arch/symbian/page_alloc_p.h b/src/corelib/arch/symbian/page_alloc_p.h
new file mode 100644
index 0000000000..f92bd332ff
--- /dev/null
+++ b/src/corelib/arch/symbian/page_alloc_p.h
@@ -0,0 +1,68 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __KERNEL_MODE__
+
+const int MAXSMALLPAGEBITS = 68<<3;
+#define MINPAGEPOWER PAGESHIFT+2
+
+struct paged_bitmap
+{
+ public:
+ inline paged_bitmap() : iBase(0), iNbits(0) {}
+ void Init(unsigned char* p, unsigned size, unsigned bit);
+//
+ inline unsigned char* Addr() const;
+ inline unsigned Size() const;
+//
+ inline void Set(unsigned ix, unsigned bit);
+ inline unsigned operator[](unsigned ix) const;
+ bool Is(unsigned ix, unsigned len, unsigned bit) const;
+ void Set(unsigned ix, unsigned len, unsigned val);
+ void Setn(unsigned ix, unsigned len, unsigned bit);
+ unsigned Bits(unsigned ix, unsigned len) const; // little endian
+ int Find(unsigned start, unsigned bit) const;
+ private:
+ unsigned char* iBase;
+ unsigned iNbits;
+};
+
+#endif // __KERNEL_MODE__
diff --git a/src/corelib/arch/symbian/qatomic_generic_armv6.cpp b/src/corelib/arch/symbian/qatomic_generic_armv6.cpp
new file mode 100644
index 0000000000..f9ce6fd154
--- /dev/null
+++ b/src/corelib/arch/symbian/qatomic_generic_armv6.cpp
@@ -0,0 +1,472 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+** This file implements the generic atomics interface using ARMv6 assembly
+** instructions. It is more efficient than the inline versions when Qt is
+** built for the THUMB instruction set, as the required instructions are
+** only available in ARM state.
+****************************************************************************/
+
+#include <QtCore/qglobal.h>
+
+#ifdef QT_HAVE_ARMV6
+#ifndef SYMBIAN_E32_ATOMIC_API
+
+QT_BEGIN_NAMESPACE
+
+QT_USE_NAMESPACE
+
+#ifdef Q_CC_RVCT
+#pragma push
+#pragma arm
+Q_CORE_EXPORT asm
+bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
+{
+ CODE32
+ //R0 = _q_value
+ //R1 = expectedValue
+ //R2 = newValue
+retry_testAndSetOrdered
+ LDREX r3,[r0] //r3 = *_q_value
+ EORS r3,r3,r1 //if (r3 == expectedValue) {
+ STREXEQ r3,r2,[r0] //*_q_value = newvalue, r3 = error
+ TEQEQ r3,#1 //if error
+ BEQ retry_testAndSetOrdered //then goto retry }
+ RSBS r0,r3,#1 //return (r3 == 0)
+ MOVCC r0,#0
+ BX r14
+}
+
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
+{
+ CODE32
+//R0 = _q_value
+//R1 = newValue
+retry_fetchAndStoreOrdered
+ LDREX r3,[r0] //r3 = *_q_value
+ STREX r2,r1,[r0] //*_q_value = newValue, r2 = error
+ TEQ r2,#0 //if error
+ BNE retry_fetchAndStoreOrdered //then goto retry
+ MOV r0,r3 //return r3
+ BX r14
+}
+
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
+{
+ CODE32
+ //R0 = _q_value
+ //R1 = valueToAdd
+ STMDB sp!,{r12,lr}
+retry_fetchAndAddOrdered
+ LDREX r2,[r0] //r2 = *_q_value
+ ADD r3,r2,r1 //r3 = r2 + r1
+ STREX r12,r3,[r0] //*_q_value = r3, r12 = error
+ TEQ r12,#0 //if error
+ BNE retry_fetchAndAddOrdered //then retry
+ MOV r0,r2 //return r2
+ LDMIA sp!,{r12,pc}
+}
+
+Q_CORE_EXPORT asm
+bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ CODE32
+ //R0 = _q_value
+ //R1 = expectedValue
+ //R2 = newValue
+retryPointer_testAndSetOrdered
+ LDREX r3,[r0] //r3 = *_q_value
+ EORS r3,r3,r1 //if (r3 == expectedValue) {
+ STREXEQ r3,r2,[r0] //*_q_value = newvalue, r3 = error
+ TEQEQ r3,#1 //if error
+ BEQ retryPointer_testAndSetOrdered //then goto retry }
+ RSBS r0,r3,#1 //return (r3 == 0)
+ MOVCC r0,#0
+ BX r14
+}
+
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
+{
+ CODE32
+ //R0 = _q_value
+ //R1 = newValue
+retryPointer_fetchAndStoreOrdered
+ LDREX r3,[r0] //r3 = *_q_value
+ STREX r2,r1,[r0] //*_q_value = newValue, r2 = error
+ TEQ r2,#0 //if error
+ BNE retryPointer_fetchAndStoreOrdered //then goto retry
+ MOV r0,r3 //return r3
+ BX r14
+}
+
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ CODE32
+ //fall through
+}
+Q_CORE_EXPORT asm
+void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ CODE32
+ //R0 = _q_value
+ //R1 = valueToAdd
+ STMDB sp!,{r12,lr}
+retryPointer_fetchAndAddOrdered
+ LDREX r2,[r0] //r2 = *_q_value
+ ADD r3,r2,r1 //r3 = r2 + r1
+ STREX r12,r3,[r0] //*_q_value = r3, r12 = error
+ TEQ r12,#0 //if error
+ BNE retryPointer_fetchAndAddOrdered //then retry
+ MOV r0,r2 //return r2
+ LDMIA sp!,{r12,pc}
+}
+
+#pragma pop
+#elif defined (Q_CC_GCCE)
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
+{
+ //R0 = _q_value
+ //R1 = expectedValue
+ //R2 = newValue
+ asm("retry_testAndSetOrdered:");
+ asm(" LDREX r3,[r0]"); //r3 = *_q_value
+ asm(" EORS r3,r3,r1"); //if (r3 == expectedValue) {
+ asm(" STREXEQ r3,r2,[r0]"); //*_q_value = newvalue, r3 = error
+ asm(" TEQEQ r3,#1"); //if error
+ asm(" BEQ retry_testAndSetOrdered"); //then goto retry }
+ asm(" RSBS r0,r3,#1"); //return (r3 == 0)
+ asm(" MOVCC r0,#0");
+ asm(" BX r14");
+}
+
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
+{
+//R0 = _q_value
+//R1 = newValue
+ asm("retry_fetchAndStoreOrdered:");
+ asm(" LDREX r3,[r0]"); //r3 = *_q_value
+ asm(" STREX r2,r1,[r0]"); //*_q_value = newValue, r2 = error
+ asm(" TEQ r2,#0"); //if error
+ asm(" BNE retry_fetchAndStoreOrdered"); //then goto retry
+ asm(" MOV r0,r3"); //return r3
+ asm(" BX r14");
+}
+
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
+{
+ //R0 = _q_value
+ //R1 = valueToAdd
+ asm(" STMDB sp!,{r12,lr}");
+ asm("retry_fetchAndAddOrdered:");
+ asm(" LDREX r2,[r0]"); //r2 = *_q_value
+ asm(" ADD r3,r2,r1 "); //r3 = r2 + r1
+ asm(" STREX r12,r3,[r0]"); //*_q_value = r3, r12 = error
+ asm(" TEQ r12,#0"); //if error
+ asm(" BNE retry_fetchAndAddOrdered"); //then retry
+ asm(" MOV r0,r2"); //return r2
+ asm(" LDMIA sp!,{r12,pc}");
+}
+
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ //R0 = _q_value
+ //R1 = expectedValue
+ //R2 = newValue
+ asm("retryPointer_testAndSetOrdered:");
+ asm(" LDREX r3,[r0]"); //r3 = *_q_value
+ asm(" EORS r3,r3,r1"); //if (r3 == expectedValue) {
+ asm(" STREXEQ r3,r2,[r0]"); //*_q_value = newvalue, r3 = error
+ asm(" TEQEQ r3,#1"); //if error
+ asm(" BEQ retryPointer_testAndSetOrdered"); //then goto retry }
+ asm(" RSBS r0,r3,#1"); //return (r3 == 0)
+ asm(" MOVCC r0,#0");
+ asm(" BX r14");
+}
+
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
+{
+ //R0 = _q_value
+ //R1 = newValue
+ asm("retryPointer_fetchAndStoreOrdered:");
+ asm(" LDREX r3,[r0]"); //r3 = *_q_value
+ asm(" STREX r2,r1,[r0]"); //*_q_value = newValue, r2 = error
+ asm(" TEQ r2,#0"); //if error
+ asm(" BNE retryPointer_fetchAndStoreOrdered"); //then goto retry
+ asm(" MOV r0,r3"); //return r3
+ asm(" BX r14");
+}
+
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ //fall through
+}
+Q_CORE_EXPORT __declspec( naked )
+void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ //R0 = _q_value
+ //R1 = valueToAdd
+ asm(" STMDB sp!,{r12,lr}");
+ asm("retryPointer_fetchAndAddOrdered:");
+ asm(" LDREX r2,[r0]"); //r2 = *_q_value
+ asm(" ADD r3,r2,r1"); //r3 = r2 + r1
+ asm(" STREX r12,r3,[r0]"); //*_q_value = r3, r12 = error
+ asm(" TEQ r12,#0"); //if error
+ asm(" BNE retryPointer_fetchAndAddOrdered"); //then retry
+ asm(" MOV r0,r2"); //return r2
+ asm(" LDMIA sp!,{r12,pc}");
+}
+#else
+#error unknown arm compiler
+#endif
+QT_END_NAMESPACE
+#endif
+#endif
diff --git a/src/corelib/arch/symbian/qatomic_symbian.cpp b/src/corelib/arch/symbian/qatomic_symbian.cpp
new file mode 100644
index 0000000000..9b997e1a70
--- /dev/null
+++ b/src/corelib/arch/symbian/qatomic_symbian.cpp
@@ -0,0 +1,527 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include <QtCore/qglobal.h>
+#include <QtCore/qatomic.h>
+
+#ifdef SYMBIAN_E32_ATOMIC_API
+#include <e32atomics.h>
+#endif
+
+#include <e32debug.h>
+
+QT_BEGIN_NAMESPACE
+
+// Heap and handle info printer.
+// This way we can report on heap cells and handles that are really not owned by anything which still exists.
+// This information can be used to detect whether memory leaks are happening, particularly if these numbers grow as the app is used more.
+// This code is placed here as it happens to make it the very last static to be destroyed in a Qt app. The
+// reason assumed is that this file appears before any other file declaring static data in the generated
+// Symbian MMP file. This particular file was chosen as it is the earliest symbian specific file.
+struct QSymbianPrintExitInfo
+{
+ QSymbianPrintExitInfo()
+ {
+ RThread().HandleCount(initProcessHandleCount, initThreadHandleCount);
+ initCells = User::CountAllocCells();
+ }
+ ~QSymbianPrintExitInfo()
+ {
+ RProcess myProc;
+ TFullName fullName = myProc.FileName();
+ TInt cells = User::CountAllocCells();
+ TInt processHandleCount=0;
+ TInt threadHandleCount=0;
+ RThread().HandleCount(processHandleCount, threadHandleCount);
+ RDebug::Print(_L("%S exiting with %d allocated cells, %d handles"),
+ &fullName,
+ cells - initCells,
+ (processHandleCount + threadHandleCount) - (initProcessHandleCount + initThreadHandleCount));
+ }
+ TInt initCells;
+ TInt initProcessHandleCount;
+ TInt initThreadHandleCount;
+} symbian_printExitInfo;
+
+Q_CORE_EXPORT bool QBasicAtomicInt::isReferenceCountingNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+Q_CORE_EXPORT bool QBasicAtomicInt::isTestAndSetNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+Q_CORE_EXPORT bool QBasicAtomicInt::isFetchAndStoreNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+Q_CORE_EXPORT bool QBasicAtomicInt::isFetchAndAddNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_isTestAndSetNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_isFetchAndStoreNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+Q_CORE_EXPORT bool QBasicAtomicPointer_isFetchAndAddNative()
+{
+#if !defined(SYMBIAN_E32_ATOMIC_API) && defined(QT_HAVE_ARMV6)
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifdef SYMBIAN_E32_ATOMIC_API
+//Symbian's API is SMP-safe when using SMP kernel, and cheap when using uniprocessor kernel
+
+//generate compiler error if casting assumptions are wrong (symbian64?)
+__ASSERT_COMPILE(sizeof(int) == sizeof(TUint32));
+__ASSERT_COMPILE(sizeof(void *) == sizeof(TUint32));
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_ord32(_q_value,
+ reinterpret_cast<TUint32*>(&expectedValue), newValue));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_rlx32(_q_value,
+ reinterpret_cast<TUint32*>(&expectedValue), newValue));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_acq32(_q_value,
+ reinterpret_cast<TUint32*>(&expectedValue), newValue));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_rel32(_q_value,
+ reinterpret_cast<TUint32*>(&expectedValue), newValue));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
+{
+ return static_cast<int>(__e32_atomic_swp_ord32(_q_value, newValue));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
+{
+ return static_cast<int>(__e32_atomic_swp_rlx32(_q_value, newValue));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
+{
+ return static_cast<int>(__e32_atomic_swp_acq32(_q_value, newValue));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
+{
+ return static_cast<int>(__e32_atomic_swp_rel32(_q_value, newValue));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
+{
+ return static_cast<int>(__e32_atomic_add_ord32(_q_value, valueToAdd));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
+{
+ return static_cast<int>(__e32_atomic_add_rlx32(_q_value, valueToAdd));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
+{
+ return static_cast<int>(__e32_atomic_add_acq32(_q_value, valueToAdd));
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
+{
+ return static_cast<int>(__e32_atomic_add_rel32(_q_value, valueToAdd));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_ord_ptr(_q_value,
+ &expectedValue,
+ newValue));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_rlx_ptr(_q_value,
+ &expectedValue,
+ newValue));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_acq_ptr(_q_value,
+ &expectedValue,
+ newValue));
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return static_cast<bool>(__e32_atomic_cas_rel_ptr(_q_value,
+ &expectedValue,
+ newValue));
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
+{
+ return __e32_atomic_swp_ord_ptr(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
+{
+ return __e32_atomic_swp_rlx_ptr(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
+{
+ return __e32_atomic_swp_acq_ptr(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
+{
+ return __e32_atomic_swp_rel_ptr(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return __e32_atomic_add_ord_ptr(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return __e32_atomic_add_rlx_ptr(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return __e32_atomic_add_acq_ptr(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return __e32_atomic_add_rel_ptr(_q_value, valueToAdd);
+}
+
+#else
+//Symbian kernels 9.4 and earlier don't expose a suitable API
+
+//For ARMv6, the generic atomics are machine coded
+#ifndef QT_HAVE_ARMV6
+
+class QCriticalSection
+{
+public:
+ QCriticalSection() { fastlock.CreateLocal(); }
+ ~QCriticalSection() { fastlock.Close(); }
+ void lock() { fastlock.Wait(); }
+ void unlock() { fastlock.Signal(); }
+
+private:
+ RFastLock fastlock;
+};
+
+QCriticalSection qAtomicCriticalSection;
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetOrdered(volatile int *_q_value, int expectedValue, int newValue)
+{
+ bool returnValue = false;
+ qAtomicCriticalSection.lock();
+ if (*_q_value == expectedValue) {
+ *_q_value = newValue;
+ returnValue = true;
+ }
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreOrdered(volatile int *_q_value, int newValue)
+{
+ int returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value = newValue;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd)
+{
+ int returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value += valueToAdd;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetOrdered(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ bool returnValue = false;
+ qAtomicCriticalSection.lock();
+ if (*_q_value == expectedValue) {
+ *_q_value = newValue;
+ returnValue = true;
+ }
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreOrdered(void * volatile *_q_value, void *newValue)
+{
+ void *returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value = newValue;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddOrdered(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ void *returnValue;
+ qAtomicCriticalSection.lock();
+ returnValue = *_q_value;
+ *_q_value = reinterpret_cast<char *>(returnValue) + valueToAdd;
+ qAtomicCriticalSection.unlock();
+ return returnValue;
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetRelaxed(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetOrdered(_q_value, expectedValue, newValue);
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetAcquire(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetOrdered(_q_value, expectedValue, newValue);
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicInt_testAndSetRelease(volatile int *_q_value, int expectedValue, int newValue)
+{
+ return QBasicAtomicInt_testAndSetOrdered(_q_value, expectedValue, newValue);
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreRelaxed(volatile int *_q_value, int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreOrdered(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreAcquire(volatile int *_q_value, int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreOrdered(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndStoreRelease(volatile int *_q_value, int newValue)
+{
+ return QBasicAtomicInt_fetchAndStoreOrdered(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddRelaxed(volatile int *_q_value, int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddAcquire(volatile int *_q_value, int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+int QBasicAtomicInt_fetchAndAddRelease(volatile int *_q_value, int valueToAdd)
+{
+ return QBasicAtomicInt_fetchAndAddOrdered(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetRelaxed(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return QBasicAtomicPointer_testAndSetOrdered(_q_value, expectedValue, newValue);
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetAcquire(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return QBasicAtomicPointer_testAndSetOrdered(_q_value, expectedValue, newValue);
+}
+
+Q_CORE_EXPORT
+bool QBasicAtomicPointer_testAndSetRelease(void * volatile *_q_value,
+ void *expectedValue,
+ void *newValue)
+{
+ return QBasicAtomicPointer_testAndSetOrdered(_q_value, expectedValue, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreRelaxed(void * volatile *_q_value, void *newValue)
+{
+ return QBasicAtomicPointer_fetchAndStoreOrdered(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreAcquire(void * volatile *_q_value, void *newValue)
+{
+ return QBasicAtomicPointer_fetchAndStoreOrdered(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndStoreRelease(void * volatile *_q_value, void *newValue)
+{
+ return QBasicAtomicPointer_fetchAndStoreOrdered(_q_value, newValue);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddRelaxed(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return QBasicAtomicPointer_fetchAndAddOrdered(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddAcquire(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return QBasicAtomicPointer_fetchAndAddOrdered(_q_value, valueToAdd);
+}
+
+Q_CORE_EXPORT
+void *QBasicAtomicPointer_fetchAndAddRelease(void * volatile *_q_value, qptrdiff valueToAdd)
+{
+ return QBasicAtomicPointer_fetchAndAddOrdered(_q_value, valueToAdd);
+}
+
+#endif // QT_HAVE_ARMV6
+#endif // SYMBIAN_E32_ATOMIC_API
+
+QT_END_NAMESPACE
diff --git a/src/corelib/arch/symbian/qt_heapsetup_symbian.cpp b/src/corelib/arch/symbian/qt_heapsetup_symbian.cpp
new file mode 100644
index 0000000000..27ed4bcd10
--- /dev/null
+++ b/src/corelib/arch/symbian/qt_heapsetup_symbian.cpp
@@ -0,0 +1,105 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#include "qt_hybridheap_symbian_p.h"
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+extern const TInt KHeapShrinkHysRatio = 0x800;
+
+/*
+ * \internal
+ * Called from the qtmain.lib application wrapper.
+ * Create a new heap as requested, but use the new allocator
+ */
+Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
+{
+ TInt r = KErrNone;
+ if (!aInfo.iAllocator && aInfo.iHeapInitialSize>0)
+ {
+ // new heap required
+ RHeap* pH = NULL;
+ r = UserHeap::CreateThreadHeap(aInfo, pH);
+ }
+ else if (aInfo.iAllocator)
+ {
+ // sharing a heap
+ RAllocator* pA = aInfo.iAllocator;
+ pA->Open();
+ User::SwitchAllocator(pA);
+ }
+ return r;
+}
+
+#ifndef NO_NAMED_LOCAL_CHUNKS
+void TChunkCreateInfo::SetThreadHeap(TInt aInitialSize, TInt aMaxSize, const TDesC& aName)
+{
+ iType = TChunkCreate::ENormal | TChunkCreate::EData;
+ iMaxSize = aMaxSize;
+ iInitialBottom = 0;
+ iInitialTop = aInitialSize;
+ iAttributes |= TChunkCreate::ELocalNamed;
+ iName = &aName;
+ iOwnerType = EOwnerThread;
+}
+#endif // NO_NAMED_LOCAL_CHUNKS
+
+void Panic(TCdtPanic reason)
+{
+ _LIT(KCat, "QtHybridHeap");
+ User::Panic(KCat, reason);
+}
+
+#else /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+
+#include <e32std.h>
+
+/*
+ * \internal
+ * Called from the qtmain.lib application wrapper.
+ * Create a new heap as requested, using the default system allocator
+ */
+Q_CORE_EXPORT TInt qt_symbian_SetupThreadHeap(TBool aNotFirst, SStdEpocThreadCreateInfo& aInfo)
+{
+ return UserHeap::SetupThreadHeap(aNotFirst, aInfo);
+}
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
diff --git a/src/corelib/arch/symbian/qt_hybridheap_symbian_p.h b/src/corelib/arch/symbian/qt_hybridheap_symbian_p.h
new file mode 100644
index 0000000000..d1ce7056f4
--- /dev/null
+++ b/src/corelib/arch/symbian/qt_hybridheap_symbian_p.h
@@ -0,0 +1,178 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef QT_HYBRIDHEAP_SYMBIAN_H
+#define QT_HYBRIDHEAP_SYMBIAN_H
+
+#include <qglobal.h>
+#include <e32cmn.h>
+
+#if !defined(__SYMBIAN_KERNEL_HYBRID_HEAP__) && !defined(__WINS__)
+//Enable the (backported) new allocator. When it is available in OS,
+//this flag should be disabled for that OS version onward
+#define QT_USE_NEW_SYMBIAN_ALLOCATOR
+#endif
+
+#ifdef QT_USE_NEW_SYMBIAN_ALLOCATOR
+
+#ifdef Q_CC_RVCT
+#pragma push
+#pragma arm
+#pragma Otime
+#pragma O2
+#endif
+
+#include "common_p.h"
+#ifdef QT_SYMBIAN_HAVE_U32STD_H
+#include <u32std.h>
+#endif
+#ifdef QT_SYMBIAN_HAVE_E32BTRACE_H
+#include <e32btrace.h>
+// enables btrace code compiling into
+#define ENABLE_BTRACE
+#endif
+#ifdef __KERNEL_MODE__
+#include <kernel/kern_priv.h>
+#endif
+#include "dla_p.h"
+#ifndef __KERNEL_MODE__
+#include "slab_p.h"
+#include "page_alloc_p.h"
+#endif
+#include "heap_hybrid_p.h"
+
+// disabling Symbian import/export macros to prevent heap_hybrid.cpp, copied from Symbian^4, from exporting symbols in arm builds
+// this minimises the code changes to heap_hybrid.cpp to ease future integration
+#undef UEXPORT_C
+#define UEXPORT_C
+#undef EXPORT_C
+#define EXPORT_C
+#undef IMPORT_D
+#define IMPORT_D
+
+// disabling code ported from Symbian^4 that we don't want/can't have in earlier platforms
+#define QT_SYMBIAN4_ALLOCATOR_UNWANTED_CODE
+
+#if defined(SYMBIAN_VERSION_9_1) || defined(SYMBIAN_VERSION_9_2) || defined(SYMBIAN_VERSION_9_3) || defined(SYMBIAN_VERSION_9_4) || defined(SYMBIAN_VERSION_SYMBIAN2)
+#define NO_NAMED_LOCAL_CHUNKS
+#endif
+
+// disabling the BTrace components of heap checking macros
+#ifndef ENABLE_BTRACE
+inline int noBTrace() {return 0;}
+#define BTraceContext12(a,b,c,d,e) noBTrace()
+#endif
+
+// declare ETHeapBadDebugFailParameter, where missing
+#define ETHeapBadDebugFailParameter ((TCdtPanic)213)
+
+#ifndef QT_SYMBIAN_HAVE_U32STD_H
+struct SThreadCreateInfo
+ {
+ TAny* iHandle;
+ TInt iType;
+ TThreadFunction iFunction;
+ TAny* iPtr;
+ TAny* iSupervisorStack;
+ TInt iSupervisorStackSize;
+ TAny* iUserStack;
+ TInt iUserStackSize;
+ TInt iInitialThreadPriority;
+ TPtrC iName;
+ TInt iTotalSize; // Size including any extras (must be a multiple of 8 bytes)
+ };
+
+struct SStdEpocThreadCreateInfo : public SThreadCreateInfo
+ {
+ RAllocator* iAllocator;
+ TInt iHeapInitialSize;
+ TInt iHeapMaxSize;
+ TInt iPadding; // Make structure size a multiple of 8 bytes
+ };
+
+class TChunkCreate
+ {
+public:
+ // Attributes for chunk creation that are used by both euser and the kernel
+ // by classes TChunkCreateInfo and SChunkCreateInfo, respectively.
+ enum TChunkCreateAtt
+ {
+ ENormal = 0x00000000,
+ EDoubleEnded = 0x00000001,
+ EDisconnected = 0x00000002,
+ ECache = 0x00000003,
+ EMappingMask = 0x0000000f,
+ ELocal = 0x00000000,
+ EGlobal = 0x00000010,
+ EData = 0x00000000,
+ ECode = 0x00000020,
+ EMemoryNotOwned = 0x00000040,
+
+ // Force local chunk to be named. Only required for thread heap
+ // chunks, all other local chunks should be nameless.
+ ELocalNamed = 0x000000080,
+
+ // Make global chunk read only to all processes but the controlling owner
+ EReadOnly = 0x000000100,
+
+ // Paging attributes for chunks.
+ EPagingUnspec = 0x00000000,
+ EPaged = 0x80000000,
+ EUnpaged = 0x40000000,
+ EPagingMask = EPaged | EUnpaged,
+
+ EChunkCreateAttMask = EMappingMask | EGlobal | ECode |
+ ELocalNamed | EReadOnly | EPagingMask,
+ };
+public:
+ TUint iAtt;
+ TBool iForceFixed;
+ TInt iInitialBottom;
+ TInt iInitialTop;
+ TInt iMaxSize;
+ TUint8 iClearByte;
+ };
+
+#endif // QT_SYMBIAN_HAVE_U32STD_H
+
+#endif /* QT_USE_NEW_SYMBIAN_ALLOCATOR */
+
+#endif /* QT_HYBRIDHEAP_SYMBIAN_H */
diff --git a/src/corelib/arch/symbian/slab_p.h b/src/corelib/arch/symbian/slab_p.h
new file mode 100644
index 0000000000..2451bbf151
--- /dev/null
+++ b/src/corelib/arch/symbian/slab_p.h
@@ -0,0 +1,125 @@
+/****************************************************************************
+**
+** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
+** All rights reserved.
+** Contact: Nokia Corporation (qt-info@nokia.com)
+**
+** This file is part of the QtCore module of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:LGPL$
+** No Commercial Usage
+** This file contains pre-release code and may not be distributed.
+** You may use this file in accordance with the terms and conditions
+** contained in the Technology Preview License Agreement accompanying
+** this package.
+**
+** GNU Lesser General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU Lesser
+** General Public License version 2.1 as published by the Free Software
+** Foundation and appearing in the file LICENSE.LGPL included in the
+** packaging of this file. Please review the following information to
+** ensure the GNU Lesser General Public License version 2.1 requirements
+** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
+**
+** In addition, as a special exception, Nokia gives you certain additional
+** rights. These rights are described in the Nokia Qt LGPL Exception
+** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
+**
+** If you have questions regarding the use of this file, please contact
+** Nokia at qt-info@nokia.com.
+**
+**
+**
+**
+**
+**
+**
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+#ifndef __KERNEL_MODE__
+
+class slab;
+class slabhdr;
+#define MAXSLABSIZE 56
+#define PAGESHIFT 12
+#define PAGESIZE (1<<PAGESHIFT)
+#define SLABSHIFT 10
+#define SLABSIZE (1 << SLABSHIFT)
+#define CELLALIGN 8
+
+
+const unsigned slabfull = 0;
+const TInt slabsperpage = (int)(PAGESIZE/SLABSIZE);
+#define HIBIT(bits) (((unsigned)bits & 0xc) ? 2 + ((unsigned)bits>>3) : ((unsigned) bits>>1))
+
+#define LOWBIT(bits) (((unsigned) bits&3) ? 1 - ((unsigned)bits&1) : 3 - (((unsigned)bits>>2)&1))
+
+#define ZEROBITS(header) (((unsigned)header & 0x70000000) ? 0 : 1)
+
+class slabhdr
+{
+ public:
+ unsigned iHeader;
+ // made up of
+ // bits | 31 | 30..28 | 27..18 | 17..12 | 11..8 | 7..0 |
+ // +----------+--------+--------+--------+---------+----------+
+ // field | floating | zero | used-4 | size | pagemap | free pos |
+ //
+ slab** iParent; // reference to iParent's pointer to this slab in tree
+ slab* iChild1; // 1st iChild in tree
+ slab* iChild2; // 2nd iChild in tree
+};
+
+const TInt KMaxSlabPayload = SLABSIZE - sizeof(slabhdr);
+#define MAXUSEDM4BITS 0x0fc00000
+#define FLOATING_BIT 0x80000000
+
+inline unsigned HeaderFloating(unsigned h)
+{return (h&0x80000000);}
+const unsigned maxuse = (SLABSIZE - sizeof(slabhdr))>>2;
+const unsigned firstpos = sizeof(slabhdr)>>2;
+
+#ifdef _DEBUG
+#define CHECKTREE(x) DoCheckSlabTree(x,EFalse)
+#define CHECKSLAB(s,t,p) DoCheckSlab(s,t,p)
+#define CHECKSLABBFR(s,p) {TUint32 b[4]; BuildPartialSlabBitmap(b,s,p);}
+#else
+#define CHECKTREE(x) (void)0
+#define CHECKSLAB(s,t,p) (void)0
+#define CHECKSLABBFR(s,p) (void)0
+#endif
+
+class slabset
+{
+ public:
+ slab* iPartial;
+};
+
+class slab : public slabhdr
+{
+ public:
+ void Init(unsigned clz);
+ //static slab* SlabFor( void* p);
+ static slab* SlabFor(const void* p) ;
+ unsigned char iPayload[SLABSIZE-sizeof(slabhdr)];
+};
+
+class page
+{
+ public:
+ inline static page* PageFor(slab* s);
+ //slab iSlabs;
+ slab iSlabs[slabsperpage];
+};
+
+
+inline page* page::PageFor(slab* s)
+{
+ return reinterpret_cast<page*>((unsigned(s))&~(PAGESIZE-1));
+}
+
+
+#endif // __KERNEL_MODE__
diff --git a/src/corelib/arch/vxworks/arch.pri b/src/corelib/arch/vxworks/arch.pri
new file mode 100644
index 0000000000..a7686186b0
--- /dev/null
+++ b/src/corelib/arch/vxworks/arch.pri
@@ -0,0 +1,6 @@
+#
+# VxWorks generic
+#
+*-ppc-* {
+ SOURCES += qatomic_ppc.s
+}
diff --git a/src/corelib/arch/vxworks/qatomic_ppc.s b/src/corelib/arch/vxworks/qatomic_ppc.s
new file mode 100644
index 0000000000..03971e89e8
--- /dev/null
+++ b/src/corelib/arch/vxworks/qatomic_ppc.s
@@ -0,0 +1,415 @@
+
+ .align 2
+ .globl q_atomic_test_and_set_int
+ .globl .q_atomic_test_and_set_int
+q_atomic_test_and_set_int:
+.q_atomic_test_and_set_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_int-.q_atomic_test_and_set_int
+ .short 25
+ .byte "q_atomic_test_and_set_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_acquire_int
+ .globl .q_atomic_test_and_set_acquire_int
+q_atomic_test_and_set_acquire_int:
+.q_atomic_test_and_set_acquire_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+16
+ stwcx. 5,0,3
+ bne- $-16
+ isync
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_acquire_int-.q_atomic_test_and_set_acquire_int
+ .short 33
+ .byte "q_atomic_test_and_set_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_release_int
+ .globl .q_atomic_test_and_set_release_int
+q_atomic_test_and_set_release_int:
+.q_atomic_test_and_set_release_int:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_release_int-.q_atomic_test_and_set_release_int
+ .short 33
+ .byte "q_atomic_test_and_set_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_ptr
+ .globl .q_atomic_test_and_set_ptr
+q_atomic_test_and_set_ptr:
+.q_atomic_test_and_set_ptr:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_ptr-.q_atomic_test_and_set_ptr
+ .short 25
+ .byte "q_atomic_test_and_set_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_acquire_ptr
+ .globl .q_atomic_test_and_set_acquire_ptr
+q_atomic_test_and_set_acquire_ptr:
+.q_atomic_test_and_set_acquire_ptr:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+16
+ stwcx. 5,0,3
+ bne- $-16
+ isync
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_acquire_ptr-.q_atomic_test_and_set_acquire_ptr
+ .short 25
+ .byte "q_atomic_test_and_set_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_test_and_set_release_ptr
+ .globl .q_atomic_test_and_set_release_ptr
+q_atomic_test_and_set_release_ptr:
+.q_atomic_test_and_set_release_ptr:
+ lwarx 6,0,3
+ xor. 6,6,4
+ bne $+12
+ stwcx. 5,0,3
+ bne- $-16
+ subfic 3,6,0
+ adde 3,3,6
+ blr
+LT..q_atomic_test_and_set_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,3,0
+ .long 0
+ .long LT..q_atomic_test_and_set_release_ptr-.q_atomic_test_and_set_release_ptr
+ .short 33
+ .byte "q_atomic_test_and_set_release_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_increment
+ .globl .q_atomic_increment
+q_atomic_increment:
+.q_atomic_increment:
+ lwarx 4,0,3
+ addi 4,4,1
+ stwcx. 4,0,3
+ bne- $-12
+ mr 3,4
+ blr
+LT..q_atomic_increment:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_increment-.q_atomic_increment
+ .short 18
+ .byte "q_atomic_increment"
+ .align 2
+
+ .align 2
+ .globl q_atomic_decrement
+ .globl .q_atomic_decrement
+q_atomic_decrement:
+.q_atomic_decrement:
+ lwarx 4,0,3
+ subi 4,4,1
+ stwcx. 4,0,3
+ bne- $-12
+ mr 3,4
+ blr
+LT..q_atomic_decrement:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_decrement-.q_atomic_decrement
+ .short 18
+ .byte "q_atomic_decrement"
+ .align 2
+
+ .align 2
+ .globl q_atomic_set_int
+ .globl .q_atomic_set_int
+q_atomic_set_int:
+.q_atomic_set_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_set_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_set_int-.q_atomic_set_int
+ .short 16
+ .byte "q_atomic_set_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_acquire_int
+ .globl .q_atomic_fetch_and_store_acquire_int
+q_atomic_fetch_and_store_acquire_int:
+.q_atomic_fetch_and_store_acquire_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_acquire_int-.q_atomic_fetch_and_store_acquire_int
+ .short 16
+ .byte "q_atomic_fetch_and_store_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_release_int
+ .globl .q_atomic_fetch_and_store_release_int
+q_atomic_fetch_and_store_release_int:
+.q_atomic_fetch_and_store_release_int:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_release_int-.q_atomic_fetch_and_store_release_int
+ .short 16
+ .byte "q_atomic_fetch_and_store_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_set_ptr
+ .globl .q_atomic_set_ptr
+q_atomic_set_ptr:
+.q_atomic_set_ptr:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_set_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_set_ptr-.q_atomic_set_ptr
+ .short 16
+ .byte "q_atomic_set_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_acquire_ptr
+ .globl .q_atomic_fetch_and_store_acquire_ptr
+q_atomic_fetch_and_store_acquire_ptr:
+.q_atomic_fetch_and_store_acquire_ptr:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_acquire_ptr-.q_atomic_fetch_and_store_acquire_ptr
+ .short 16
+ .byte "q_atomic_fetch_and_store_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_store_release_ptr
+ .globl .q_atomic_fetch_and_store_release_ptr
+q_atomic_fetch_and_store_release_ptr:
+.q_atomic_fetch_and_store_release_ptr:
+ lwarx 5,0,3
+ stwcx. 4,0,3
+ bne- $-8
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_store_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,2,0
+ .long 0
+ .long LT..q_atomic_fetch_and_store_release_ptr-.q_atomic_fetch_and_store_release_ptr
+ .short 16
+ .byte "q_atomic_fetch_and_store_release_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_int
+ .globl .q_atomic_fetch_and_add_int
+q_atomic_fetch_and_add_int:
+.q_atomic_fetch_and_add_int:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_int-.q_atomic_fetch_and_add_int
+ .short 18
+ .byte "q_atomic_fetch_and_add_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_acquire_int
+ .globl .q_atomic_fetch_and_add_acquire_int
+q_atomic_fetch_and_add_acquire_int:
+.q_atomic_fetch_and_add_acquire_int:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_acquire_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_acquire_int-.q_atomic_fetch_and_add_acquire_int
+ .short 18
+ .byte "q_atomic_fetch_and_add_acquire_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_release_int
+ .globl .q_atomic_fetch_and_add_release_int
+q_atomic_fetch_and_add_release_int:
+.q_atomic_fetch_and_add_release_int:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_release_int:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_release_int-.q_atomic_fetch_and_add_release_int
+ .short 34
+ .byte "q_atomic_fetch_and_add_release_int"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_ptr
+ .globl .q_atomic_fetch_and_add_ptr
+q_atomic_fetch_and_add_ptr:
+.q_atomic_fetch_and_add_ptr:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_ptr-.q_atomic_fetch_and_add_ptr
+ .short 26
+ .byte "q_atomic_fetch_and_add_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_acquire_ptr
+ .globl .q_atomic_fetch_and_add_acquire_ptr
+q_atomic_fetch_and_add_acquire_ptr:
+.q_atomic_fetch_and_add_acquire_ptr:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ isync
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_acquire_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_acquire_ptr-.q_atomic_fetch_and_add_acquire_ptr
+ .short 34
+ .byte "q_atomic_fetch_and_add_acquire_ptr"
+ .align 2
+
+ .align 2
+ .globl q_atomic_fetch_and_add_release_ptr
+ .globl .q_atomic_fetch_and_add_release_ptr
+q_atomic_fetch_and_add_release_ptr:
+.q_atomic_fetch_and_add_release_ptr:
+ lwarx 5,0,3
+ add 6,4,5
+ stwcx. 6,0,3
+ bne- $-12
+ mr 3,5
+ blr
+LT..q_atomic_fetch_and_add_release_ptr:
+ .long 0
+ .byte 0,9,32,64,0,0,1,0
+ .long 0
+ .long LT..q_atomic_fetch_and_add_release_ptr-.q_atomic_fetch_and_add_release_ptr
+ .short 34
+ .byte "q_atomic_fetch_and_add_release_ptr"
+ .align 2
+
+_section_.text:
+ .long _section_.text
diff --git a/src/corelib/arch/windows/arch.pri b/src/corelib/arch/windows/arch.pri
new file mode 100644
index 0000000000..d42374b98f
--- /dev/null
+++ b/src/corelib/arch/windows/arch.pri
@@ -0,0 +1,3 @@
+#
+# Windows architecture
+#
diff --git a/src/corelib/arch/x86_64/arch.pri b/src/corelib/arch/x86_64/arch.pri
new file mode 100644
index 0000000000..4145b7b133
--- /dev/null
+++ b/src/corelib/arch/x86_64/arch.pri
@@ -0,0 +1,4 @@
+#
+# AMD64 architecture
+#
+solaris-cc*:SOURCES += $$QT_ARCH_CPP/qatomic_sun.s
diff --git a/src/corelib/arch/x86_64/qatomic_sun.s b/src/corelib/arch/x86_64/qatomic_sun.s
new file mode 100644
index 0000000000..37969e61cb
--- /dev/null
+++ b/src/corelib/arch/x86_64/qatomic_sun.s
@@ -0,0 +1,91 @@
+ .code64
+
+ .globl q_atomic_increment
+ .type q_atomic_increment,@function
+ .section .text, "ax"
+ .align 16
+q_atomic_increment:
+ lock
+ incl (%rdi)
+ setne %al
+ ret
+ .size q_atomic_increment,.-q_atomic_increment
+
+ .globl q_atomic_decrement
+ .type q_atomic_decrement,@function
+ .section .text, "ax"
+ .align 16
+q_atomic_decrement:
+ lock
+ decl (%rdi)
+ setne %al
+ ret
+ .size q_atomic_decrement,.-q_atomic_decrement
+
+ .globl q_atomic_test_and_set_int
+ .type q_atomic_test_and_set_int, @function
+ .section .text, "ax"
+ .align 16
+q_atomic_test_and_set_int:
+ movl %esi,%eax
+ lock
+ cmpxchgl %edx,(%rdi)
+ movl $0,%eax
+ sete %al
+ ret
+ .size q_atomic_test_and_set_int, . - q_atomic_test_and_set_int
+
+ .globl q_atomic_set_int
+ .type q_atomic_set_int,@function
+ .section .text, "ax"
+ .align 16
+q_atomic_set_int:
+ xchgl %esi,(%rdi)
+ movl %esi,%eax
+ ret
+ .size q_atomic_set_int,.-q_atomic_set_int
+
+ .globl q_atomic_fetch_and_add_int
+ .type q_atomic_fetch_and_add_int,@function
+ .section .text, "ax"
+ .align 16
+q_atomic_fetch_and_add_int:
+ lock
+ xaddl %esi,(%rdi)
+ movl %esi, %eax
+ ret
+ .size q_atomic_fetch_and_add_int,.-q_atomic_fetch_and_add_int
+
+ .globl q_atomic_test_and_set_ptr
+ .type q_atomic_test_and_set_ptr, @function
+ .section .text, "ax"
+ .align 16
+q_atomic_test_and_set_ptr:
+ movq %rsi,%rax
+ lock
+ cmpxchgq %rdx,(%rdi)
+ movq $0, %rax
+ sete %al
+ ret
+ .size q_atomic_test_and_set_ptr, . - q_atomic_test_and_set_ptr
+
+ .globl q_atomic_set_ptr
+ .type q_atomic_set_ptr,@function
+ .section .text, "ax"
+ .align 16
+q_atomic_set_ptr:
+ xchgq %rsi,(%rdi)
+ movq %rsi,%rax
+ ret
+ .size q_atomic_set_ptr,.-q_atomic_set_ptr
+
+ .globl q_atomic_fetch_and_add_ptr
+ .type q_atomic_fetch_and_add_ptr,@function
+ .section .text, "ax"
+ .align 16
+q_atomic_fetch_and_add_ptr:
+ lock
+ xaddq %rsi,(%rdi)
+ movq %rsi,%rax
+ ret
+ .size q_atomic_fetch_and_add_ptr,.-q_atomic_fetch_and_add_ptr